2024-11-20 15:21:05,282 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-20 15:21:05,294 main DEBUG Took 0.009484 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-20 15:21:05,294 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-20 15:21:05,294 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-20 15:21:05,295 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-20 15:21:05,296 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 15:21:05,303 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-20 15:21:05,315 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 15:21:05,316 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 15:21:05,317 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 15:21:05,318 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 15:21:05,318 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 15:21:05,319 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 15:21:05,320 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 15:21:05,320 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 15:21:05,321 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 15:21:05,321 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 15:21:05,323 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 15:21:05,323 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 15:21:05,324 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 15:21:05,324 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 15:21:05,325 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 15:21:05,325 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 15:21:05,326 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 15:21:05,326 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 15:21:05,327 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 15:21:05,327 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 15:21:05,328 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 15:21:05,328 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 15:21:05,329 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 15:21:05,329 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 15:21:05,330 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 15:21:05,331 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-20 15:21:05,332 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 15:21:05,334 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-20 15:21:05,337 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-20 15:21:05,337 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-20 15:21:05,339 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-20 15:21:05,339 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-20 15:21:05,351 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-20 15:21:05,353 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-20 15:21:05,355 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-20 15:21:05,356 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-20 15:21:05,356 main DEBUG createAppenders(={Console}) 2024-11-20 15:21:05,357 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-20 15:21:05,357 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-20 15:21:05,357 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-20 15:21:05,358 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-20 15:21:05,359 main DEBUG OutputStream closed 2024-11-20 15:21:05,359 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-20 15:21:05,359 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-20 15:21:05,360 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-20 15:21:05,444 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-20 15:21:05,447 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-20 15:21:05,448 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-20 15:21:05,449 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-20 15:21:05,450 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-20 15:21:05,450 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-20 15:21:05,451 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-20 15:21:05,451 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-20 15:21:05,452 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-20 15:21:05,452 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-20 15:21:05,453 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-20 15:21:05,453 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-20 15:21:05,454 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-20 15:21:05,454 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-20 15:21:05,455 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-20 15:21:05,455 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-20 15:21:05,456 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-20 15:21:05,457 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-20 15:21:05,459 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-20 15:21:05,459 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-20 15:21:05,459 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-20 15:21:05,460 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-20T15:21:05,695 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3 2024-11-20 15:21:05,698 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-20 15:21:05,699 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-20T15:21:05,707 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithBasicPolicy timeout: 13 mins 2024-11-20T15:21:05,731 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T15:21:05,734 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/cluster_20e01121-862a-f127-22c7-564b6246e54a, deleteOnExit=true 2024-11-20T15:21:05,734 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-20T15:21:05,735 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/test.cache.data in system properties and HBase conf 2024-11-20T15:21:05,735 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T15:21:05,736 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/hadoop.log.dir in system properties and HBase conf 2024-11-20T15:21:05,737 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T15:21:05,737 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T15:21:05,737 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-20T15:21:05,845 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-20T15:21:05,942 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T15:21:05,946 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T15:21:05,946 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T15:21:05,947 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T15:21:05,947 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T15:21:05,948 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T15:21:05,948 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T15:21:05,948 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T15:21:05,949 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T15:21:05,949 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T15:21:05,949 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/nfs.dump.dir in system properties and HBase conf 2024-11-20T15:21:05,950 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/java.io.tmpdir in system properties and HBase conf 2024-11-20T15:21:05,950 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T15:21:05,950 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T15:21:05,951 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T15:21:06,751 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-20T15:21:06,834 INFO [Time-limited test {}] log.Log(170): Logging initialized @2246ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-20T15:21:06,913 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T15:21:06,977 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T15:21:06,998 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T15:21:06,998 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T15:21:06,999 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T15:21:07,013 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T15:21:07,015 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/hadoop.log.dir/,AVAILABLE} 2024-11-20T15:21:07,016 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T15:21:07,215 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/java.io.tmpdir/jetty-localhost-40237-hadoop-hdfs-3_4_1-tests_jar-_-any-828511780617982028/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T15:21:07,222 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:40237} 2024-11-20T15:21:07,223 INFO [Time-limited test {}] server.Server(415): Started @2635ms 2024-11-20T15:21:07,634 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T15:21:07,641 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T15:21:07,642 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T15:21:07,642 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T15:21:07,642 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T15:21:07,643 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/hadoop.log.dir/,AVAILABLE} 2024-11-20T15:21:07,644 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T15:21:07,762 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f79ec76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/java.io.tmpdir/jetty-localhost-40611-hadoop-hdfs-3_4_1-tests_jar-_-any-10772963300809599459/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T15:21:07,763 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:40611} 2024-11-20T15:21:07,763 INFO [Time-limited test {}] server.Server(415): Started @3175ms 2024-11-20T15:21:07,819 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T15:21:08,234 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/cluster_20e01121-862a-f127-22c7-564b6246e54a/dfs/data/data2/current/BP-1888144914-172.17.0.2-1732116066515/current, will proceed with Du for space computation calculation, 2024-11-20T15:21:08,234 WARN [Thread-71 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/cluster_20e01121-862a-f127-22c7-564b6246e54a/dfs/data/data1/current/BP-1888144914-172.17.0.2-1732116066515/current, will proceed with Du for space computation calculation, 2024-11-20T15:21:08,281 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T15:21:08,335 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf8f2e39400d77add with lease ID 0x487077313342a455: Processing first storage report for DS-d64d27ac-7176-46be-b11a-20641e17b60e from datanode DatanodeRegistration(127.0.0.1:35761, datanodeUuid=e1101302-ca42-4c36-926f-c76325d0f3d3, infoPort=38795, infoSecurePort=0, ipcPort=36087, storageInfo=lv=-57;cid=testClusterID;nsid=767006008;c=1732116066515) 2024-11-20T15:21:08,336 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf8f2e39400d77add with lease ID 0x487077313342a455: from storage DS-d64d27ac-7176-46be-b11a-20641e17b60e node DatanodeRegistration(127.0.0.1:35761, datanodeUuid=e1101302-ca42-4c36-926f-c76325d0f3d3, infoPort=38795, infoSecurePort=0, ipcPort=36087, storageInfo=lv=-57;cid=testClusterID;nsid=767006008;c=1732116066515), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T15:21:08,337 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf8f2e39400d77add with lease ID 0x487077313342a455: Processing first storage report for DS-f2fdaa71-c8ad-4c54-b4e4-01c4329b1638 from datanode DatanodeRegistration(127.0.0.1:35761, datanodeUuid=e1101302-ca42-4c36-926f-c76325d0f3d3, infoPort=38795, infoSecurePort=0, ipcPort=36087, storageInfo=lv=-57;cid=testClusterID;nsid=767006008;c=1732116066515) 2024-11-20T15:21:08,337 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf8f2e39400d77add with lease ID 0x487077313342a455: from storage DS-f2fdaa71-c8ad-4c54-b4e4-01c4329b1638 node DatanodeRegistration(127.0.0.1:35761, datanodeUuid=e1101302-ca42-4c36-926f-c76325d0f3d3, infoPort=38795, infoSecurePort=0, ipcPort=36087, storageInfo=lv=-57;cid=testClusterID;nsid=767006008;c=1732116066515), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T15:21:08,390 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3 2024-11-20T15:21:08,468 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/cluster_20e01121-862a-f127-22c7-564b6246e54a/zookeeper_0, clientPort=62338, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/cluster_20e01121-862a-f127-22c7-564b6246e54a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/cluster_20e01121-862a-f127-22c7-564b6246e54a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T15:21:08,478 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=62338 2024-11-20T15:21:08,491 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T15:21:08,495 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T15:21:08,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741825_1001 (size=7) 2024-11-20T15:21:09,127 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3 with version=8 2024-11-20T15:21:09,127 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/hbase-staging 2024-11-20T15:21:09,253 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-20T15:21:09,517 INFO [Time-limited test {}] client.ConnectionUtils(129): master/0b62285ead89:0 server-side Connection retries=45 2024-11-20T15:21:09,536 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T15:21:09,536 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T15:21:09,536 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T15:21:09,537 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T15:21:09,537 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T15:21:09,667 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T15:21:09,725 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-20T15:21:09,733 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-20T15:21:09,737 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T15:21:09,764 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 20235 (auto-detected) 2024-11-20T15:21:09,765 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-20T15:21:09,784 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:46835 2024-11-20T15:21:09,791 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T15:21:09,794 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T15:21:09,806 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:46835 connecting to ZooKeeper ensemble=127.0.0.1:62338 2024-11-20T15:21:09,837 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:468350x0, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T15:21:09,839 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46835-0x100158571480000 connected 2024-11-20T15:21:09,867 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T15:21:09,870 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T15:21:09,873 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T15:21:09,878 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46835 2024-11-20T15:21:09,880 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46835 2024-11-20T15:21:09,880 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46835 2024-11-20T15:21:09,882 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46835 2024-11-20T15:21:09,883 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46835 2024-11-20T15:21:09,890 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3, hbase.cluster.distributed=false 2024-11-20T15:21:09,955 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/0b62285ead89:0 server-side Connection retries=45 2024-11-20T15:21:09,955 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T15:21:09,955 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T15:21:09,955 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T15:21:09,955 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T15:21:09,956 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T15:21:09,958 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T15:21:09,960 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T15:21:09,961 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:33387 2024-11-20T15:21:09,963 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T15:21:09,968 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T15:21:09,969 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T15:21:09,972 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T15:21:09,976 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:33387 connecting to ZooKeeper ensemble=127.0.0.1:62338 2024-11-20T15:21:09,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:333870x0, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T15:21:09,981 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:333870x0, quorum=127.0.0.1:62338, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T15:21:09,981 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33387-0x100158571480001 connected 2024-11-20T15:21:09,983 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33387-0x100158571480001, quorum=127.0.0.1:62338, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T15:21:09,984 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33387-0x100158571480001, quorum=127.0.0.1:62338, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T15:21:09,984 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33387 2024-11-20T15:21:09,985 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33387 2024-11-20T15:21:09,985 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33387 2024-11-20T15:21:09,986 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33387 2024-11-20T15:21:09,986 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33387 2024-11-20T15:21:09,988 INFO [master/0b62285ead89:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/0b62285ead89,46835,1732116069247 2024-11-20T15:21:09,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33387-0x100158571480001, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T15:21:09,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T15:21:10,000 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0b62285ead89,46835,1732116069247 2024-11-20T15:21:10,004 DEBUG [M:0;0b62285ead89:46835 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0b62285ead89:46835 2024-11-20T15:21:10,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T15:21:10,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33387-0x100158571480001, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T15:21:10,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33387-0x100158571480001, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T15:21:10,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T15:21:10,026 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T15:21:10,027 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T15:21:10,027 INFO [master/0b62285ead89:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0b62285ead89,46835,1732116069247 from backup master directory 2024-11-20T15:21:10,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0b62285ead89,46835,1732116069247 2024-11-20T15:21:10,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33387-0x100158571480001, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T15:21:10,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T15:21:10,032 WARN [master/0b62285ead89:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T15:21:10,033 INFO [master/0b62285ead89:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0b62285ead89,46835,1732116069247 2024-11-20T15:21:10,035 INFO [master/0b62285ead89:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-20T15:21:10,036 INFO [master/0b62285ead89:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-20T15:21:10,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741826_1002 (size=42) 2024-11-20T15:21:10,506 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/hbase.id with ID: f1dedfb5-6497-4bc3-badc-f75ee5465ad3 2024-11-20T15:21:10,546 INFO [master/0b62285ead89:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T15:21:10,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33387-0x100158571480001, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T15:21:10,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T15:21:10,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741827_1003 (size=196) 2024-11-20T15:21:11,005 INFO [master/0b62285ead89:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T15:21:11,007 INFO [master/0b62285ead89:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T15:21:11,026 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:11,030 INFO [master/0b62285ead89:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T15:21:11,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741828_1004 (size=1189) 2024-11-20T15:21:11,487 INFO [master/0b62285ead89:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/MasterData/data/master/store 2024-11-20T15:21:11,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741829_1005 (size=34) 2024-11-20T15:21:11,908 INFO [master/0b62285ead89:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-20T15:21:11,908 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T15:21:11,909 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T15:21:11,910 INFO [master/0b62285ead89:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T15:21:11,910 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T15:21:11,910 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T15:21:11,910 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T15:21:11,910 INFO [master/0b62285ead89:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T15:21:11,910 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T15:21:11,912 WARN [master/0b62285ead89:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/MasterData/data/master/store/.initializing 2024-11-20T15:21:11,913 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/MasterData/WALs/0b62285ead89,46835,1732116069247 2024-11-20T15:21:11,919 INFO [master/0b62285ead89:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-20T15:21:11,930 INFO [master/0b62285ead89:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0b62285ead89%2C46835%2C1732116069247, suffix=, logDir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/MasterData/WALs/0b62285ead89,46835,1732116069247, archiveDir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/MasterData/oldWALs, maxLogs=10 2024-11-20T15:21:11,952 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/MasterData/WALs/0b62285ead89,46835,1732116069247/0b62285ead89%2C46835%2C1732116069247.1732116071934, exclude list is [], retry=0 2024-11-20T15:21:11,969 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35761,DS-d64d27ac-7176-46be-b11a-20641e17b60e,DISK] 2024-11-20T15:21:11,972 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-20T15:21:12,008 INFO [master/0b62285ead89:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/MasterData/WALs/0b62285ead89,46835,1732116069247/0b62285ead89%2C46835%2C1732116069247.1732116071934 2024-11-20T15:21:12,009 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38795:38795)] 2024-11-20T15:21:12,009 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T15:21:12,010 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T15:21:12,013 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T15:21:12,014 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T15:21:12,056 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T15:21:12,080 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T15:21:12,084 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:12,087 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T15:21:12,087 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T15:21:12,090 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T15:21:12,091 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:12,092 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:21:12,092 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T15:21:12,095 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T15:21:12,095 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:12,096 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:21:12,096 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T15:21:12,099 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T15:21:12,099 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:12,100 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:21:12,105 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T15:21:12,106 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T15:21:12,114 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T15:21:12,119 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T15:21:12,124 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T15:21:12,125 INFO [master/0b62285ead89:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65326796, jitterRate=-0.026554882526397705}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T15:21:12,131 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T15:21:12,133 INFO [master/0b62285ead89:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T15:21:12,162 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e262798, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:21:12,197 INFO [master/0b62285ead89:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-20T15:21:12,211 INFO [master/0b62285ead89:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T15:21:12,211 INFO [master/0b62285ead89:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T15:21:12,214 INFO [master/0b62285ead89:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T15:21:12,216 INFO [master/0b62285ead89:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-11-20T15:21:12,221 INFO [master/0b62285ead89:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 5 msec 2024-11-20T15:21:12,221 INFO [master/0b62285ead89:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T15:21:12,253 INFO [master/0b62285ead89:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T15:21:12,269 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T15:21:12,272 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-20T15:21:12,274 INFO [master/0b62285ead89:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T15:21:12,275 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T15:21:12,277 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-20T15:21:12,279 INFO [master/0b62285ead89:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T15:21:12,283 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T15:21:12,284 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-20T15:21:12,285 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T15:21:12,287 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T15:21:12,297 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T15:21:12,299 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T15:21:12,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33387-0x100158571480001, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T15:21:12,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T15:21:12,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33387-0x100158571480001, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T15:21:12,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T15:21:12,303 INFO [master/0b62285ead89:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=0b62285ead89,46835,1732116069247, sessionid=0x100158571480000, setting cluster-up flag (Was=false) 2024-11-20T15:21:12,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33387-0x100158571480001, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T15:21:12,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T15:21:12,322 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T15:21:12,324 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0b62285ead89,46835,1732116069247 2024-11-20T15:21:12,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33387-0x100158571480001, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T15:21:12,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T15:21:12,335 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T15:21:12,337 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0b62285ead89,46835,1732116069247 2024-11-20T15:21:12,402 DEBUG [RS:0;0b62285ead89:33387 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0b62285ead89:33387 2024-11-20T15:21:12,403 INFO [RS:0;0b62285ead89:33387 {}] regionserver.HRegionServer(1008): ClusterId : f1dedfb5-6497-4bc3-badc-f75ee5465ad3 2024-11-20T15:21:12,406 DEBUG [RS:0;0b62285ead89:33387 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T15:21:12,410 DEBUG [RS:0;0b62285ead89:33387 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T15:21:12,411 DEBUG [RS:0;0b62285ead89:33387 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T15:21:12,413 DEBUG [RS:0;0b62285ead89:33387 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T15:21:12,414 DEBUG [RS:0;0b62285ead89:33387 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27a9c593, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:21:12,416 DEBUG [RS:0;0b62285ead89:33387 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1594605a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0b62285ead89/172.17.0.2:0 2024-11-20T15:21:12,418 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-20T15:21:12,419 INFO [RS:0;0b62285ead89:33387 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-20T15:21:12,419 INFO [RS:0;0b62285ead89:33387 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-20T15:21:12,419 DEBUG [RS:0;0b62285ead89:33387 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-20T15:21:12,421 INFO [RS:0;0b62285ead89:33387 {}] regionserver.HRegionServer(3073): reportForDuty to master=0b62285ead89,46835,1732116069247 with isa=0b62285ead89/172.17.0.2:33387, startcode=1732116069954 2024-11-20T15:21:12,424 INFO [master/0b62285ead89:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-20T15:21:12,427 INFO [master/0b62285ead89:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T15:21:12,433 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0b62285ead89,46835,1732116069247 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T15:21:12,435 DEBUG [RS:0;0b62285ead89:33387 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T15:21:12,437 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0b62285ead89:0, corePoolSize=5, maxPoolSize=5 2024-11-20T15:21:12,437 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0b62285ead89:0, corePoolSize=5, maxPoolSize=5 2024-11-20T15:21:12,437 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0b62285ead89:0, corePoolSize=5, maxPoolSize=5 2024-11-20T15:21:12,437 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0b62285ead89:0, corePoolSize=5, maxPoolSize=5 2024-11-20T15:21:12,437 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0b62285ead89:0, corePoolSize=10, maxPoolSize=10 2024-11-20T15:21:12,437 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0b62285ead89:0, corePoolSize=1, maxPoolSize=1 2024-11-20T15:21:12,438 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0b62285ead89:0, corePoolSize=2, maxPoolSize=2 2024-11-20T15:21:12,438 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0b62285ead89:0, corePoolSize=1, maxPoolSize=1 2024-11-20T15:21:12,441 INFO [master/0b62285ead89:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732116102441 2024-11-20T15:21:12,443 INFO [master/0b62285ead89:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T15:21:12,443 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-20T15:21:12,444 INFO [master/0b62285ead89:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T15:21:12,444 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-20T15:21:12,447 INFO [master/0b62285ead89:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T15:21:12,448 INFO [master/0b62285ead89:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T15:21:12,448 INFO [master/0b62285ead89:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T15:21:12,448 INFO [master/0b62285ead89:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T15:21:12,449 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:12,449 INFO [master/0b62285ead89:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T15:21:12,449 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T15:21:12,450 INFO [master/0b62285ead89:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T15:21:12,451 INFO [master/0b62285ead89:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T15:21:12,451 INFO [master/0b62285ead89:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T15:21:12,454 INFO [master/0b62285ead89:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T15:21:12,454 INFO [master/0b62285ead89:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T15:21:12,456 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0b62285ead89:0:becomeActiveMaster-HFileCleaner.large.0-1732116072455,5,FailOnTimeoutGroup] 2024-11-20T15:21:12,457 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0b62285ead89:0:becomeActiveMaster-HFileCleaner.small.0-1732116072456,5,FailOnTimeoutGroup] 2024-11-20T15:21:12,458 INFO [master/0b62285ead89:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T15:21:12,458 INFO [master/0b62285ead89:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T15:21:12,459 INFO [master/0b62285ead89:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T15:21:12,460 INFO [master/0b62285ead89:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T15:21:12,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741831_1007 (size=1039) 2024-11-20T15:21:12,473 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58221, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T15:21:12,479 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46835 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 0b62285ead89,33387,1732116069954 2024-11-20T15:21:12,481 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46835 {}] master.ServerManager(486): Registering regionserver=0b62285ead89,33387,1732116069954 2024-11-20T15:21:12,495 DEBUG [RS:0;0b62285ead89:33387 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3 2024-11-20T15:21:12,495 DEBUG [RS:0;0b62285ead89:33387 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:40559 2024-11-20T15:21:12,495 DEBUG [RS:0;0b62285ead89:33387 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-20T15:21:12,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T15:21:12,500 DEBUG [RS:0;0b62285ead89:33387 {}] zookeeper.ZKUtil(111): regionserver:33387-0x100158571480001, quorum=127.0.0.1:62338, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0b62285ead89,33387,1732116069954 2024-11-20T15:21:12,501 WARN [RS:0;0b62285ead89:33387 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T15:21:12,501 INFO [RS:0;0b62285ead89:33387 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T15:21:12,501 DEBUG [RS:0;0b62285ead89:33387 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/WALs/0b62285ead89,33387,1732116069954 2024-11-20T15:21:12,503 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0b62285ead89,33387,1732116069954] 2024-11-20T15:21:12,514 DEBUG [RS:0;0b62285ead89:33387 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-20T15:21:12,525 INFO [RS:0;0b62285ead89:33387 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T15:21:12,537 INFO [RS:0;0b62285ead89:33387 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T15:21:12,540 INFO [RS:0;0b62285ead89:33387 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T15:21:12,540 INFO [RS:0;0b62285ead89:33387 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T15:21:12,541 INFO [RS:0;0b62285ead89:33387 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-20T15:21:12,548 INFO [RS:0;0b62285ead89:33387 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T15:21:12,548 DEBUG [RS:0;0b62285ead89:33387 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0b62285ead89:0, corePoolSize=1, maxPoolSize=1 2024-11-20T15:21:12,549 DEBUG [RS:0;0b62285ead89:33387 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0b62285ead89:0, corePoolSize=1, maxPoolSize=1 2024-11-20T15:21:12,549 DEBUG [RS:0;0b62285ead89:33387 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0b62285ead89:0, corePoolSize=1, maxPoolSize=1 2024-11-20T15:21:12,549 DEBUG [RS:0;0b62285ead89:33387 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0b62285ead89:0, corePoolSize=1, maxPoolSize=1 2024-11-20T15:21:12,549 DEBUG [RS:0;0b62285ead89:33387 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0b62285ead89:0, corePoolSize=1, maxPoolSize=1 2024-11-20T15:21:12,549 DEBUG [RS:0;0b62285ead89:33387 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0b62285ead89:0, corePoolSize=2, maxPoolSize=2 2024-11-20T15:21:12,549 DEBUG [RS:0;0b62285ead89:33387 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0, corePoolSize=1, maxPoolSize=1 2024-11-20T15:21:12,549 DEBUG [RS:0;0b62285ead89:33387 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0b62285ead89:0, corePoolSize=1, maxPoolSize=1 2024-11-20T15:21:12,549 DEBUG [RS:0;0b62285ead89:33387 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0b62285ead89:0, corePoolSize=1, maxPoolSize=1 2024-11-20T15:21:12,550 DEBUG [RS:0;0b62285ead89:33387 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0b62285ead89:0, corePoolSize=1, maxPoolSize=1 2024-11-20T15:21:12,550 DEBUG [RS:0;0b62285ead89:33387 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0b62285ead89:0, corePoolSize=1, maxPoolSize=1 2024-11-20T15:21:12,550 DEBUG [RS:0;0b62285ead89:33387 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0b62285ead89:0, corePoolSize=3, maxPoolSize=3 2024-11-20T15:21:12,550 DEBUG [RS:0;0b62285ead89:33387 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0, corePoolSize=3, maxPoolSize=3 2024-11-20T15:21:12,551 INFO [RS:0;0b62285ead89:33387 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T15:21:12,551 INFO [RS:0;0b62285ead89:33387 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T15:21:12,551 INFO [RS:0;0b62285ead89:33387 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T15:21:12,551 INFO [RS:0;0b62285ead89:33387 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T15:21:12,551 INFO [RS:0;0b62285ead89:33387 {}] hbase.ChoreService(168): Chore ScheduledChore name=0b62285ead89,33387,1732116069954-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T15:21:12,576 INFO [RS:0;0b62285ead89:33387 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T15:21:12,579 INFO [RS:0;0b62285ead89:33387 {}] hbase.ChoreService(168): Chore ScheduledChore name=0b62285ead89,33387,1732116069954-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T15:21:12,603 INFO [RS:0;0b62285ead89:33387 {}] regionserver.Replication(204): 0b62285ead89,33387,1732116069954 started 2024-11-20T15:21:12,603 INFO [RS:0;0b62285ead89:33387 {}] regionserver.HRegionServer(1767): Serving as 0b62285ead89,33387,1732116069954, RpcServer on 0b62285ead89/172.17.0.2:33387, sessionid=0x100158571480001 2024-11-20T15:21:12,604 DEBUG [RS:0;0b62285ead89:33387 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T15:21:12,604 DEBUG [RS:0;0b62285ead89:33387 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0b62285ead89,33387,1732116069954 2024-11-20T15:21:12,605 DEBUG [RS:0;0b62285ead89:33387 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0b62285ead89,33387,1732116069954' 2024-11-20T15:21:12,605 DEBUG [RS:0;0b62285ead89:33387 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T15:21:12,606 DEBUG [RS:0;0b62285ead89:33387 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T15:21:12,606 DEBUG [RS:0;0b62285ead89:33387 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T15:21:12,606 DEBUG [RS:0;0b62285ead89:33387 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T15:21:12,607 DEBUG [RS:0;0b62285ead89:33387 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0b62285ead89,33387,1732116069954 2024-11-20T15:21:12,607 DEBUG [RS:0;0b62285ead89:33387 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0b62285ead89,33387,1732116069954' 2024-11-20T15:21:12,607 DEBUG [RS:0;0b62285ead89:33387 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T15:21:12,607 DEBUG [RS:0;0b62285ead89:33387 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T15:21:12,608 DEBUG [RS:0;0b62285ead89:33387 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T15:21:12,608 INFO [RS:0;0b62285ead89:33387 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T15:21:12,608 INFO [RS:0;0b62285ead89:33387 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T15:21:12,714 INFO [RS:0;0b62285ead89:33387 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-20T15:21:12,717 INFO [RS:0;0b62285ead89:33387 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0b62285ead89%2C33387%2C1732116069954, suffix=, logDir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/WALs/0b62285ead89,33387,1732116069954, archiveDir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/oldWALs, maxLogs=32 2024-11-20T15:21:12,734 DEBUG [RS:0;0b62285ead89:33387 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/WALs/0b62285ead89,33387,1732116069954/0b62285ead89%2C33387%2C1732116069954.1732116072720, exclude list is [], retry=0 2024-11-20T15:21:12,739 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35761,DS-d64d27ac-7176-46be-b11a-20641e17b60e,DISK] 2024-11-20T15:21:12,743 INFO [RS:0;0b62285ead89:33387 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/WALs/0b62285ead89,33387,1732116069954/0b62285ead89%2C33387%2C1732116069954.1732116072720 2024-11-20T15:21:12,743 DEBUG [RS:0;0b62285ead89:33387 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38795:38795)] 2024-11-20T15:21:12,867 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-20T15:21:12,868 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3 2024-11-20T15:21:12,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741833_1009 (size=32) 2024-11-20T15:21:13,279 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T15:21:13,282 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T15:21:13,285 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T15:21:13,285 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:13,286 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T15:21:13,286 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T15:21:13,288 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T15:21:13,289 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:13,289 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T15:21:13,290 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T15:21:13,292 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T15:21:13,292 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:13,293 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T15:21:13,295 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/meta/1588230740 2024-11-20T15:21:13,296 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/meta/1588230740 2024-11-20T15:21:13,299 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T15:21:13,302 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-20T15:21:13,306 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T15:21:13,307 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66248261, jitterRate=-0.012823984026908875}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T15:21:13,310 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-20T15:21:13,310 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-20T15:21:13,310 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-20T15:21:13,310 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-20T15:21:13,310 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T15:21:13,310 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T15:21:13,311 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-20T15:21:13,311 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-20T15:21:13,315 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-20T15:21:13,315 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-20T15:21:13,321 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T15:21:13,329 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T15:21:13,331 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T15:21:13,483 DEBUG [0b62285ead89:46835 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T15:21:13,488 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:21:13,493 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0b62285ead89,33387,1732116069954, state=OPENING 2024-11-20T15:21:13,498 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T15:21:13,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33387-0x100158571480001, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T15:21:13,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T15:21:13,501 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T15:21:13,501 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T15:21:13,502 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=0b62285ead89,33387,1732116069954}] 2024-11-20T15:21:13,676 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:13,678 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T15:21:13,681 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45764, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T15:21:13,692 INFO [RS_OPEN_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-20T15:21:13,693 INFO [RS_OPEN_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T15:21:13,693 INFO [RS_OPEN_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-20T15:21:13,696 INFO [RS_OPEN_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0b62285ead89%2C33387%2C1732116069954.meta, suffix=.meta, logDir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/WALs/0b62285ead89,33387,1732116069954, archiveDir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/oldWALs, maxLogs=32 2024-11-20T15:21:13,713 DEBUG [RS_OPEN_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/WALs/0b62285ead89,33387,1732116069954/0b62285ead89%2C33387%2C1732116069954.meta.1732116073698.meta, exclude list is [], retry=0 2024-11-20T15:21:13,717 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35761,DS-d64d27ac-7176-46be-b11a-20641e17b60e,DISK] 2024-11-20T15:21:13,719 INFO [RS_OPEN_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/WALs/0b62285ead89,33387,1732116069954/0b62285ead89%2C33387%2C1732116069954.meta.1732116073698.meta 2024-11-20T15:21:13,720 DEBUG [RS_OPEN_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38795:38795)] 2024-11-20T15:21:13,720 DEBUG [RS_OPEN_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T15:21:13,722 DEBUG [RS_OPEN_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T15:21:13,781 DEBUG [RS_OPEN_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T15:21:13,786 INFO [RS_OPEN_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T15:21:13,790 DEBUG [RS_OPEN_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T15:21:13,791 DEBUG [RS_OPEN_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T15:21:13,791 DEBUG [RS_OPEN_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-20T15:21:13,791 DEBUG [RS_OPEN_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-20T15:21:13,794 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T15:21:13,796 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T15:21:13,796 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:13,797 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T15:21:13,797 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T15:21:13,799 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T15:21:13,799 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:13,799 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T15:21:13,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T15:21:13,801 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T15:21:13,801 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:13,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T15:21:13,803 DEBUG [RS_OPEN_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/meta/1588230740 2024-11-20T15:21:13,806 DEBUG [RS_OPEN_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/meta/1588230740 2024-11-20T15:21:13,808 DEBUG [RS_OPEN_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T15:21:13,810 DEBUG [RS_OPEN_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-20T15:21:13,812 INFO [RS_OPEN_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69380290, jitterRate=0.033846884965896606}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T15:21:13,813 DEBUG [RS_OPEN_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-20T15:21:13,821 INFO [RS_OPEN_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732116073670 2024-11-20T15:21:13,832 DEBUG [RS_OPEN_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T15:21:13,833 INFO [RS_OPEN_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-20T15:21:13,834 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:21:13,836 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0b62285ead89,33387,1732116069954, state=OPEN 2024-11-20T15:21:13,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33387-0x100158571480001, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T15:21:13,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T15:21:13,840 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T15:21:13,840 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T15:21:13,844 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T15:21:13,845 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=0b62285ead89,33387,1732116069954 in 338 msec 2024-11-20T15:21:13,851 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T15:21:13,851 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 526 msec 2024-11-20T15:21:13,857 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.4840 sec 2024-11-20T15:21:13,858 INFO [master/0b62285ead89:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732116073858, completionTime=-1 2024-11-20T15:21:13,858 INFO [master/0b62285ead89:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T15:21:13,858 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-20T15:21:13,897 DEBUG [hconnection-0x10bb86e4-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:21:13,899 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45770, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:21:13,910 INFO [master/0b62285ead89:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-20T15:21:13,910 INFO [master/0b62285ead89:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732116133910 2024-11-20T15:21:13,910 INFO [master/0b62285ead89:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732116193910 2024-11-20T15:21:13,910 INFO [master/0b62285ead89:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 52 msec 2024-11-20T15:21:13,931 INFO [master/0b62285ead89:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0b62285ead89,46835,1732116069247-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T15:21:13,931 INFO [master/0b62285ead89:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0b62285ead89,46835,1732116069247-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T15:21:13,932 INFO [master/0b62285ead89:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0b62285ead89,46835,1732116069247-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T15:21:13,933 INFO [master/0b62285ead89:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0b62285ead89:46835, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T15:21:13,934 INFO [master/0b62285ead89:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T15:21:13,939 DEBUG [master/0b62285ead89:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-20T15:21:13,942 INFO [master/0b62285ead89:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-20T15:21:13,943 INFO [master/0b62285ead89:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T15:21:13,949 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-20T15:21:13,952 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T15:21:13,953 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:13,955 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T15:21:13,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741835_1011 (size=358) 2024-11-20T15:21:14,370 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => dc4fbec7dbed2ad8a83ee5514bc45c4e, NAME => 'hbase:namespace,,1732116073943.dc4fbec7dbed2ad8a83ee5514bc45c4e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3 2024-11-20T15:21:14,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741836_1012 (size=42) 2024-11-20T15:21:14,781 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732116073943.dc4fbec7dbed2ad8a83ee5514bc45c4e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T15:21:14,781 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing dc4fbec7dbed2ad8a83ee5514bc45c4e, disabling compactions & flushes 2024-11-20T15:21:14,781 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732116073943.dc4fbec7dbed2ad8a83ee5514bc45c4e. 2024-11-20T15:21:14,781 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732116073943.dc4fbec7dbed2ad8a83ee5514bc45c4e. 2024-11-20T15:21:14,781 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732116073943.dc4fbec7dbed2ad8a83ee5514bc45c4e. after waiting 0 ms 2024-11-20T15:21:14,782 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732116073943.dc4fbec7dbed2ad8a83ee5514bc45c4e. 2024-11-20T15:21:14,782 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1732116073943.dc4fbec7dbed2ad8a83ee5514bc45c4e. 2024-11-20T15:21:14,782 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for dc4fbec7dbed2ad8a83ee5514bc45c4e: 2024-11-20T15:21:14,784 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T15:21:14,791 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1732116073943.dc4fbec7dbed2ad8a83ee5514bc45c4e.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1732116074785"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732116074785"}]},"ts":"1732116074785"} 2024-11-20T15:21:14,814 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T15:21:14,816 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T15:21:14,819 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732116074816"}]},"ts":"1732116074816"} 2024-11-20T15:21:14,823 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-20T15:21:14,829 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=dc4fbec7dbed2ad8a83ee5514bc45c4e, ASSIGN}] 2024-11-20T15:21:14,831 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=dc4fbec7dbed2ad8a83ee5514bc45c4e, ASSIGN 2024-11-20T15:21:14,833 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=dc4fbec7dbed2ad8a83ee5514bc45c4e, ASSIGN; state=OFFLINE, location=0b62285ead89,33387,1732116069954; forceNewPlan=false, retain=false 2024-11-20T15:21:14,983 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=dc4fbec7dbed2ad8a83ee5514bc45c4e, regionState=OPENING, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:21:14,987 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure dc4fbec7dbed2ad8a83ee5514bc45c4e, server=0b62285ead89,33387,1732116069954}] 2024-11-20T15:21:15,141 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:15,148 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1732116073943.dc4fbec7dbed2ad8a83ee5514bc45c4e. 2024-11-20T15:21:15,148 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => dc4fbec7dbed2ad8a83ee5514bc45c4e, NAME => 'hbase:namespace,,1732116073943.dc4fbec7dbed2ad8a83ee5514bc45c4e.', STARTKEY => '', ENDKEY => ''} 2024-11-20T15:21:15,149 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace dc4fbec7dbed2ad8a83ee5514bc45c4e 2024-11-20T15:21:15,149 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732116073943.dc4fbec7dbed2ad8a83ee5514bc45c4e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T15:21:15,149 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for dc4fbec7dbed2ad8a83ee5514bc45c4e 2024-11-20T15:21:15,149 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for dc4fbec7dbed2ad8a83ee5514bc45c4e 2024-11-20T15:21:15,152 INFO [StoreOpener-dc4fbec7dbed2ad8a83ee5514bc45c4e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region dc4fbec7dbed2ad8a83ee5514bc45c4e 2024-11-20T15:21:15,155 INFO [StoreOpener-dc4fbec7dbed2ad8a83ee5514bc45c4e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dc4fbec7dbed2ad8a83ee5514bc45c4e columnFamilyName info 2024-11-20T15:21:15,155 DEBUG [StoreOpener-dc4fbec7dbed2ad8a83ee5514bc45c4e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:15,156 INFO [StoreOpener-dc4fbec7dbed2ad8a83ee5514bc45c4e-1 {}] regionserver.HStore(327): Store=dc4fbec7dbed2ad8a83ee5514bc45c4e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:21:15,157 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/namespace/dc4fbec7dbed2ad8a83ee5514bc45c4e 2024-11-20T15:21:15,158 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/namespace/dc4fbec7dbed2ad8a83ee5514bc45c4e 2024-11-20T15:21:15,161 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for dc4fbec7dbed2ad8a83ee5514bc45c4e 2024-11-20T15:21:15,165 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/namespace/dc4fbec7dbed2ad8a83ee5514bc45c4e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T15:21:15,166 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened dc4fbec7dbed2ad8a83ee5514bc45c4e; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72361155, jitterRate=0.0782652348279953}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T15:21:15,167 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for dc4fbec7dbed2ad8a83ee5514bc45c4e: 2024-11-20T15:21:15,170 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1732116073943.dc4fbec7dbed2ad8a83ee5514bc45c4e., pid=6, masterSystemTime=1732116075141 2024-11-20T15:21:15,173 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1732116073943.dc4fbec7dbed2ad8a83ee5514bc45c4e. 2024-11-20T15:21:15,173 INFO [RS_OPEN_PRIORITY_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1732116073943.dc4fbec7dbed2ad8a83ee5514bc45c4e. 2024-11-20T15:21:15,174 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=dc4fbec7dbed2ad8a83ee5514bc45c4e, regionState=OPEN, openSeqNum=2, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:21:15,181 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T15:21:15,183 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure dc4fbec7dbed2ad8a83ee5514bc45c4e, server=0b62285ead89,33387,1732116069954 in 190 msec 2024-11-20T15:21:15,185 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T15:21:15,185 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=dc4fbec7dbed2ad8a83ee5514bc45c4e, ASSIGN in 352 msec 2024-11-20T15:21:15,186 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T15:21:15,187 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732116075187"}]},"ts":"1732116075187"} 2024-11-20T15:21:15,189 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-20T15:21:15,193 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T15:21:15,196 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.2490 sec 2024-11-20T15:21:15,254 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-20T15:21:15,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-20T15:21:15,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33387-0x100158571480001, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T15:21:15,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T15:21:15,283 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-20T15:21:15,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-20T15:21:15,304 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 24 msec 2024-11-20T15:21:15,307 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-20T15:21:15,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-20T15:21:15,323 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 15 msec 2024-11-20T15:21:15,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-20T15:21:15,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-20T15:21:15,336 INFO [master/0b62285ead89:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 5.303sec 2024-11-20T15:21:15,338 INFO [master/0b62285ead89:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T15:21:15,339 INFO [master/0b62285ead89:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T15:21:15,340 INFO [master/0b62285ead89:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T15:21:15,341 INFO [master/0b62285ead89:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T15:21:15,341 INFO [master/0b62285ead89:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T15:21:15,342 INFO [master/0b62285ead89:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0b62285ead89,46835,1732116069247-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T15:21:15,342 INFO [master/0b62285ead89:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0b62285ead89,46835,1732116069247-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T15:21:15,348 DEBUG [master/0b62285ead89:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-20T15:21:15,349 INFO [master/0b62285ead89:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T15:21:15,350 INFO [master/0b62285ead89:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0b62285ead89,46835,1732116069247-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T15:21:15,407 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5e83c466 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@39dee83f 2024-11-20T15:21:15,407 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-20T15:21:15,414 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67b8b597, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:21:15,417 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-20T15:21:15,418 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-20T15:21:15,429 DEBUG [hconnection-0x4c09ef46-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:21:15,439 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45780, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:21:15,448 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=0b62285ead89,46835,1732116069247 2024-11-20T15:21:15,466 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=223, ProcessCount=11, AvailableMemoryMB=6843 2024-11-20T15:21:15,477 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T15:21:15,480 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42508, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T15:21:15,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T15:21:15,510 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T15:21:15,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T15:21:15,516 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T15:21:15,516 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:15,519 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T15:21:15,519 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-20T15:21:15,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T15:21:15,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741837_1013 (size=960) 2024-11-20T15:21:15,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T15:21:15,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T15:21:15,938 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3 2024-11-20T15:21:15,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741838_1014 (size=53) 2024-11-20T15:21:15,948 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T15:21:15,948 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing d2e9d0b7937c41ae63a82116ea698557, disabling compactions & flushes 2024-11-20T15:21:15,948 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:15,948 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:15,948 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. after waiting 0 ms 2024-11-20T15:21:15,948 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:15,948 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:15,948 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:15,950 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T15:21:15,950 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732116075950"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732116075950"}]},"ts":"1732116075950"} 2024-11-20T15:21:15,953 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T15:21:15,954 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T15:21:15,955 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732116075954"}]},"ts":"1732116075954"} 2024-11-20T15:21:15,957 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T15:21:15,961 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2e9d0b7937c41ae63a82116ea698557, ASSIGN}] 2024-11-20T15:21:15,963 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2e9d0b7937c41ae63a82116ea698557, ASSIGN 2024-11-20T15:21:15,964 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2e9d0b7937c41ae63a82116ea698557, ASSIGN; state=OFFLINE, location=0b62285ead89,33387,1732116069954; forceNewPlan=false, retain=false 2024-11-20T15:21:16,115 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=d2e9d0b7937c41ae63a82116ea698557, regionState=OPENING, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:21:16,118 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954}] 2024-11-20T15:21:16,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T15:21:16,272 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:16,279 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:16,279 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} 2024-11-20T15:21:16,280 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:16,280 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T15:21:16,280 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:16,280 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:16,282 INFO [StoreOpener-d2e9d0b7937c41ae63a82116ea698557-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:16,286 INFO [StoreOpener-d2e9d0b7937c41ae63a82116ea698557-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:21:16,286 INFO [StoreOpener-d2e9d0b7937c41ae63a82116ea698557-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2e9d0b7937c41ae63a82116ea698557 columnFamilyName A 2024-11-20T15:21:16,286 DEBUG [StoreOpener-d2e9d0b7937c41ae63a82116ea698557-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:16,287 INFO [StoreOpener-d2e9d0b7937c41ae63a82116ea698557-1 {}] regionserver.HStore(327): Store=d2e9d0b7937c41ae63a82116ea698557/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:21:16,287 INFO [StoreOpener-d2e9d0b7937c41ae63a82116ea698557-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:16,289 INFO [StoreOpener-d2e9d0b7937c41ae63a82116ea698557-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:21:16,290 INFO [StoreOpener-d2e9d0b7937c41ae63a82116ea698557-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2e9d0b7937c41ae63a82116ea698557 columnFamilyName B 2024-11-20T15:21:16,290 DEBUG [StoreOpener-d2e9d0b7937c41ae63a82116ea698557-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:16,290 INFO [StoreOpener-d2e9d0b7937c41ae63a82116ea698557-1 {}] regionserver.HStore(327): Store=d2e9d0b7937c41ae63a82116ea698557/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:21:16,291 INFO [StoreOpener-d2e9d0b7937c41ae63a82116ea698557-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:16,292 INFO [StoreOpener-d2e9d0b7937c41ae63a82116ea698557-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:21:16,293 INFO [StoreOpener-d2e9d0b7937c41ae63a82116ea698557-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2e9d0b7937c41ae63a82116ea698557 columnFamilyName C 2024-11-20T15:21:16,293 DEBUG [StoreOpener-d2e9d0b7937c41ae63a82116ea698557-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:16,294 INFO [StoreOpener-d2e9d0b7937c41ae63a82116ea698557-1 {}] regionserver.HStore(327): Store=d2e9d0b7937c41ae63a82116ea698557/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:21:16,294 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:16,295 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:16,296 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:16,299 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T15:21:16,301 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:16,304 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T15:21:16,305 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened d2e9d0b7937c41ae63a82116ea698557; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62864660, jitterRate=-0.063243567943573}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T15:21:16,306 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:16,307 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., pid=11, masterSystemTime=1732116076271 2024-11-20T15:21:16,310 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:16,310 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:16,311 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=d2e9d0b7937c41ae63a82116ea698557, regionState=OPEN, openSeqNum=2, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:21:16,318 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-20T15:21:16,318 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 in 196 msec 2024-11-20T15:21:16,321 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-20T15:21:16,321 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2e9d0b7937c41ae63a82116ea698557, ASSIGN in 357 msec 2024-11-20T15:21:16,322 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T15:21:16,323 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732116076322"}]},"ts":"1732116076322"} 2024-11-20T15:21:16,325 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T15:21:16,328 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T15:21:16,331 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 818 msec 2024-11-20T15:21:16,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T15:21:16,641 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-20T15:21:16,645 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0e98ea32 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b9fcedf 2024-11-20T15:21:16,649 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e71e468, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:21:16,651 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:21:16,654 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45782, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:21:16,657 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T15:21:16,658 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42518, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T15:21:16,665 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6c63ae4e to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22cb07dd 2024-11-20T15:21:16,669 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72b32f98, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:21:16,671 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62c43377 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@18cb251d 2024-11-20T15:21:16,674 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4977266, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:21:16,676 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4ee2166f to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5400112e 2024-11-20T15:21:16,679 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bbb5d8a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:21:16,680 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5a8f4734 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@e52b42a 2024-11-20T15:21:16,685 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18603bb9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:21:16,687 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b5f27aa to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@10c964e8 2024-11-20T15:21:16,692 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9ed28bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:21:16,695 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c3b736e to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@70267494 2024-11-20T15:21:16,698 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@490457fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:21:16,700 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x353bc462 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@767a8485 2024-11-20T15:21:16,704 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d2a8e08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:21:16,706 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6502d571 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c915d17 2024-11-20T15:21:16,710 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f6b07e3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:21:16,712 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2a0471b9 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@cdbe80b 2024-11-20T15:21:16,716 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3bb8b26c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:21:16,720 DEBUG [hconnection-0x426356f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:21:16,724 DEBUG [hconnection-0x45f0f0a5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:21:16,725 DEBUG [hconnection-0x6313e8f1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:21:16,726 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45786, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:21:16,726 DEBUG [hconnection-0x5f7a19bf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:21:16,726 DEBUG [hconnection-0x4b613b0d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:21:16,727 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:21:16,727 DEBUG [hconnection-0x4fd12293-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:21:16,728 DEBUG [hconnection-0x5dc12106-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:21:16,728 DEBUG [hconnection-0x364c5753-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:21:16,728 DEBUG [hconnection-0x4d811003-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:21:16,730 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45792, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:21:16,732 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45806, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:21:16,732 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45812, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:21:16,732 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45810, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:21:16,732 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45828, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:21:16,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-20T15:21:16,733 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45842, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:21:16,735 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:21:16,735 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45852, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:21:16,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T15:21:16,736 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:21:16,738 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:21:16,757 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45862, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:21:16,825 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T15:21:16,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:16,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:16,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:16,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:16,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:16,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:16,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:16,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T15:21:16,902 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:16,904 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T15:21:16,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:16,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:16,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:16,918 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:16,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:16,948 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/e46ba40fb1c742e98af60d8b247e81b4 is 50, key is test_row_0/A:col10/1732116076807/Put/seqid=0 2024-11-20T15:21:16,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:16,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741839_1015 (size=9657) 2024-11-20T15:21:16,976 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/e46ba40fb1c742e98af60d8b247e81b4 2024-11-20T15:21:16,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:16,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116136980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116136987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116136995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,010 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116136997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,010 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116136995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T15:21:17,079 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/851eab813ca742919b77141b4264a81a is 50, key is test_row_0/B:col10/1732116076807/Put/seqid=0 2024-11-20T15:21:17,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741840_1016 (size=9657) 2024-11-20T15:21:17,101 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/851eab813ca742919b77141b4264a81a 2024-11-20T15:21:17,103 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,104 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T15:21:17,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:17,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:17,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:17,109 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:17,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:17,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:17,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116137131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,135 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116137131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,136 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116137132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,136 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116137133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,138 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116137133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,148 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/f79a2aa455294e188d2f6e1911c9fd5e is 50, key is test_row_0/C:col10/1732116076807/Put/seqid=0 2024-11-20T15:21:17,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741841_1017 (size=9657) 2024-11-20T15:21:17,166 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/f79a2aa455294e188d2f6e1911c9fd5e 2024-11-20T15:21:17,189 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/e46ba40fb1c742e98af60d8b247e81b4 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/e46ba40fb1c742e98af60d8b247e81b4 2024-11-20T15:21:17,201 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/e46ba40fb1c742e98af60d8b247e81b4, entries=100, sequenceid=13, filesize=9.4 K 2024-11-20T15:21:17,207 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-20T15:21:17,209 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/851eab813ca742919b77141b4264a81a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/851eab813ca742919b77141b4264a81a 2024-11-20T15:21:17,224 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/851eab813ca742919b77141b4264a81a, entries=100, sequenceid=13, filesize=9.4 K 2024-11-20T15:21:17,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/f79a2aa455294e188d2f6e1911c9fd5e as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f79a2aa455294e188d2f6e1911c9fd5e 2024-11-20T15:21:17,242 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f79a2aa455294e188d2f6e1911c9fd5e, entries=100, sequenceid=13, filesize=9.4 K 2024-11-20T15:21:17,244 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for d2e9d0b7937c41ae63a82116ea698557 in 419ms, sequenceid=13, compaction requested=false 2024-11-20T15:21:17,245 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-20T15:21:17,247 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:17,264 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,265 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T15:21:17,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:17,268 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T15:21:17,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:17,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:17,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:17,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:17,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:17,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:17,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/6a32833fe7804d049dd48aef49452324 is 50, key is test_row_0/A:col10/1732116076977/Put/seqid=0 2024-11-20T15:21:17,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741842_1018 (size=12001) 2024-11-20T15:21:17,317 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/6a32833fe7804d049dd48aef49452324 2024-11-20T15:21:17,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:17,345 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:17,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T15:21:17,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/f747aeb9ddd547f2b1c10335c1a8beb0 is 50, key is test_row_0/B:col10/1732116076977/Put/seqid=0 2024-11-20T15:21:17,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116137361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116137361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116137361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116137366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116137368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741843_1019 (size=12001) 2024-11-20T15:21:17,387 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/f747aeb9ddd547f2b1c10335c1a8beb0 2024-11-20T15:21:17,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/aa99cf67c0c94b90b59c2db93a89525d is 50, key is test_row_0/C:col10/1732116076977/Put/seqid=0 2024-11-20T15:21:17,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741844_1020 (size=12001) 2024-11-20T15:21:17,432 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/aa99cf67c0c94b90b59c2db93a89525d 2024-11-20T15:21:17,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/6a32833fe7804d049dd48aef49452324 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/6a32833fe7804d049dd48aef49452324 2024-11-20T15:21:17,464 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/6a32833fe7804d049dd48aef49452324, entries=150, sequenceid=39, filesize=11.7 K 2024-11-20T15:21:17,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/f747aeb9ddd547f2b1c10335c1a8beb0 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/f747aeb9ddd547f2b1c10335c1a8beb0 2024-11-20T15:21:17,482 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/f747aeb9ddd547f2b1c10335c1a8beb0, entries=150, sequenceid=39, filesize=11.7 K 2024-11-20T15:21:17,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/aa99cf67c0c94b90b59c2db93a89525d as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/aa99cf67c0c94b90b59c2db93a89525d 2024-11-20T15:21:17,491 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116137473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,493 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116137474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,494 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116137475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,495 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116137476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116137476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,503 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/aa99cf67c0c94b90b59c2db93a89525d, entries=150, sequenceid=39, filesize=11.7 K 2024-11-20T15:21:17,505 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for d2e9d0b7937c41ae63a82116ea698557 in 237ms, sequenceid=39, compaction requested=false 2024-11-20T15:21:17,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:17,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:17,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-20T15:21:17,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-20T15:21:17,519 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-20T15:21:17,519 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 770 msec 2024-11-20T15:21:17,525 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 792 msec 2024-11-20T15:21:17,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:17,705 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T15:21:17,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:17,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:17,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:17,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:17,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:17,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:17,729 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/931a3c4da70e4cde9b5d6184622d0145 is 50, key is test_row_0/A:col10/1732116077705/Put/seqid=0 2024-11-20T15:21:17,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741845_1021 (size=16681) 2024-11-20T15:21:17,774 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116137764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116137770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,777 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116137771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,778 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116137773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,779 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116137775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T15:21:17,852 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-20T15:21:17,855 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:21:17,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-20T15:21:17,860 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:21:17,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T15:21:17,861 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:21:17,861 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:21:17,883 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116137880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116137880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,886 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116137881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,886 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116137881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:17,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116137882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:17,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T15:21:18,015 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:18,016 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T15:21:18,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:18,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:18,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:18,016 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:18,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:18,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:18,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:18,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116138087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:18,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:18,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116138089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:18,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:18,095 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:18,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116138090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:18,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116138090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:18,096 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:18,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116138092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:18,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T15:21:18,171 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/931a3c4da70e4cde9b5d6184622d0145 2024-11-20T15:21:18,171 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:18,173 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T15:21:18,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:18,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:18,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:18,174 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:18,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:18,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:18,192 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/61f789a2095a41ac8f59f91140d458b3 is 50, key is test_row_0/B:col10/1732116077705/Put/seqid=0 2024-11-20T15:21:18,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741846_1022 (size=12001) 2024-11-20T15:21:18,327 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:18,329 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T15:21:18,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:18,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:18,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:18,330 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:18,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:18,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:18,394 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:18,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116138393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:18,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:18,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116138398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:18,403 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:18,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116138400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:18,404 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:18,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116138400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:18,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:18,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116138403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:18,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T15:21:18,484 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:18,485 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T15:21:18,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:18,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:18,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:18,496 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:18,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:18,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:18,518 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T15:21:18,519 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-20T15:21:18,613 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/61f789a2095a41ac8f59f91140d458b3 2024-11-20T15:21:18,634 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/227eff475d42493fac890e2cc6f789ca is 50, key is test_row_0/C:col10/1732116077705/Put/seqid=0 2024-11-20T15:21:18,652 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:18,653 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T15:21:18,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:18,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:18,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:18,654 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:18,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:18,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:18,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741847_1023 (size=12001) 2024-11-20T15:21:18,662 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/227eff475d42493fac890e2cc6f789ca 2024-11-20T15:21:18,678 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/931a3c4da70e4cde9b5d6184622d0145 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/931a3c4da70e4cde9b5d6184622d0145 2024-11-20T15:21:18,694 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/931a3c4da70e4cde9b5d6184622d0145, entries=250, sequenceid=53, filesize=16.3 K 2024-11-20T15:21:18,697 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/61f789a2095a41ac8f59f91140d458b3 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/61f789a2095a41ac8f59f91140d458b3 2024-11-20T15:21:18,713 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/61f789a2095a41ac8f59f91140d458b3, entries=150, sequenceid=53, filesize=11.7 K 2024-11-20T15:21:18,715 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/227eff475d42493fac890e2cc6f789ca as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/227eff475d42493fac890e2cc6f789ca 2024-11-20T15:21:18,735 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/227eff475d42493fac890e2cc6f789ca, entries=150, sequenceid=53, filesize=11.7 K 2024-11-20T15:21:18,738 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for d2e9d0b7937c41ae63a82116ea698557 in 1032ms, sequenceid=53, compaction requested=true 2024-11-20T15:21:18,738 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:18,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:21:18,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:18,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:21:18,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:18,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:21:18,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:18,740 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:18,740 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:18,745 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:18,747 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/B is initiating minor compaction (all files) 2024-11-20T15:21:18,747 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/B in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:18,747 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/851eab813ca742919b77141b4264a81a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/f747aeb9ddd547f2b1c10335c1a8beb0, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/61f789a2095a41ac8f59f91140d458b3] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=32.9 K 2024-11-20T15:21:18,749 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 851eab813ca742919b77141b4264a81a, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732116076807 2024-11-20T15:21:18,750 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting f747aeb9ddd547f2b1c10335c1a8beb0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732116076977 2024-11-20T15:21:18,750 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38339 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:18,751 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 61f789a2095a41ac8f59f91140d458b3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732116077341 2024-11-20T15:21:18,751 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/A is initiating minor compaction (all files) 2024-11-20T15:21:18,751 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/A in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:18,751 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/e46ba40fb1c742e98af60d8b247e81b4, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/6a32833fe7804d049dd48aef49452324, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/931a3c4da70e4cde9b5d6184622d0145] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=37.4 K 2024-11-20T15:21:18,755 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting e46ba40fb1c742e98af60d8b247e81b4, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732116076807 2024-11-20T15:21:18,756 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a32833fe7804d049dd48aef49452324, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732116076977 2024-11-20T15:21:18,757 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 931a3c4da70e4cde9b5d6184622d0145, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732116077341 2024-11-20T15:21:18,785 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#B#compaction#9 average throughput is 0.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:18,786 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/9c48194c21d946f191c8de3e4b750692 is 50, key is test_row_0/B:col10/1732116077705/Put/seqid=0 2024-11-20T15:21:18,808 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:18,809 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T15:21:18,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:18,809 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T15:21:18,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:18,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:18,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:18,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:18,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:18,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:18,820 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#A#compaction#10 average throughput is 0.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:18,821 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/734bf2e426c34e269af3352afd063fa1 is 50, key is test_row_0/A:col10/1732116077705/Put/seqid=0 2024-11-20T15:21:18,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/1c5b0266c5bf4a5b8df7cc366c63b1e2 is 50, key is test_row_0/A:col10/1732116077765/Put/seqid=0 2024-11-20T15:21:18,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741848_1024 (size=12104) 2024-11-20T15:21:18,839 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/9c48194c21d946f191c8de3e4b750692 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/9c48194c21d946f191c8de3e4b750692 2024-11-20T15:21:18,862 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/B of d2e9d0b7937c41ae63a82116ea698557 into 9c48194c21d946f191c8de3e4b750692(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:18,862 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:18,863 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/B, priority=13, startTime=1732116078740; duration=0sec 2024-11-20T15:21:18,863 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:18,863 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:B 2024-11-20T15:21:18,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741849_1025 (size=12104) 2024-11-20T15:21:18,871 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:18,878 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/734bf2e426c34e269af3352afd063fa1 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/734bf2e426c34e269af3352afd063fa1 2024-11-20T15:21:18,880 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:18,880 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/C is initiating minor compaction (all files) 2024-11-20T15:21:18,880 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/C in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:18,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741850_1026 (size=12001) 2024-11-20T15:21:18,888 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f79a2aa455294e188d2f6e1911c9fd5e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/aa99cf67c0c94b90b59c2db93a89525d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/227eff475d42493fac890e2cc6f789ca] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=32.9 K 2024-11-20T15:21:18,888 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting f79a2aa455294e188d2f6e1911c9fd5e, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732116076807 2024-11-20T15:21:18,890 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting aa99cf67c0c94b90b59c2db93a89525d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732116076977 2024-11-20T15:21:18,892 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 227eff475d42493fac890e2cc6f789ca, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732116077341 2024-11-20T15:21:18,893 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/A of d2e9d0b7937c41ae63a82116ea698557 into 734bf2e426c34e269af3352afd063fa1(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:18,893 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:18,893 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/A, priority=13, startTime=1732116078739; duration=0sec 2024-11-20T15:21:18,893 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:18,893 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:A 2024-11-20T15:21:18,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:18,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:18,917 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#C#compaction#12 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:18,918 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/5997f3f1083a4388ab8b9d137f3175a5 is 50, key is test_row_0/C:col10/1732116077705/Put/seqid=0 2024-11-20T15:21:18,941 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:18,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116138933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:18,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:18,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116138935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:18,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:18,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116138939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:18,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741851_1027 (size=12104) 2024-11-20T15:21:18,950 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:18,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116138942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:18,951 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:18,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116138943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:18,966 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/5997f3f1083a4388ab8b9d137f3175a5 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/5997f3f1083a4388ab8b9d137f3175a5 2024-11-20T15:21:18,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T15:21:18,980 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/C of d2e9d0b7937c41ae63a82116ea698557 into 5997f3f1083a4388ab8b9d137f3175a5(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:18,981 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:18,981 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/C, priority=13, startTime=1732116078740; duration=0sec 2024-11-20T15:21:18,981 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:18,981 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:C 2024-11-20T15:21:19,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:19,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:19,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:19,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116139045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:19,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116139047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:19,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116139046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:19,054 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:19,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116139054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:19,055 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:19,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116139054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:19,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:19,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116139255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:19,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:19,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116139255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:19,259 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:19,259 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:19,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116139258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:19,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116139258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:19,260 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:19,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116139259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:19,286 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/1c5b0266c5bf4a5b8df7cc366c63b1e2 2024-11-20T15:21:19,310 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T15:21:19,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/b41877d0403841478db5990fff8b3766 is 50, key is test_row_0/B:col10/1732116077765/Put/seqid=0 2024-11-20T15:21:19,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741852_1028 (size=12001) 2024-11-20T15:21:19,364 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/b41877d0403841478db5990fff8b3766 2024-11-20T15:21:19,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/96bcc62f36e54d32b94a0fee08b51cac is 50, key is test_row_0/C:col10/1732116077765/Put/seqid=0 2024-11-20T15:21:19,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741853_1029 (size=12001) 2024-11-20T15:21:19,412 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/96bcc62f36e54d32b94a0fee08b51cac 2024-11-20T15:21:19,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/1c5b0266c5bf4a5b8df7cc366c63b1e2 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/1c5b0266c5bf4a5b8df7cc366c63b1e2 2024-11-20T15:21:19,448 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/1c5b0266c5bf4a5b8df7cc366c63b1e2, entries=150, sequenceid=76, filesize=11.7 K 2024-11-20T15:21:19,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/b41877d0403841478db5990fff8b3766 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/b41877d0403841478db5990fff8b3766 2024-11-20T15:21:19,467 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/b41877d0403841478db5990fff8b3766, entries=150, sequenceid=76, filesize=11.7 K 2024-11-20T15:21:19,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/96bcc62f36e54d32b94a0fee08b51cac as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/96bcc62f36e54d32b94a0fee08b51cac 2024-11-20T15:21:19,484 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/96bcc62f36e54d32b94a0fee08b51cac, entries=150, sequenceid=76, filesize=11.7 K 2024-11-20T15:21:19,486 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for d2e9d0b7937c41ae63a82116ea698557 in 676ms, sequenceid=76, compaction requested=false 2024-11-20T15:21:19,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:19,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:19,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-20T15:21:19,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-20T15:21:19,494 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-20T15:21:19,494 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6280 sec 2024-11-20T15:21:19,498 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.6400 sec 2024-11-20T15:21:19,568 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T15:21:19,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:19,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:19,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:19,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:19,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:19,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:19,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:19,592 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/5176bd14faaf4bd2aeb11d12b7eb1699 is 50, key is test_row_0/A:col10/1732116079568/Put/seqid=0 2024-11-20T15:21:19,606 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:19,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116139597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:19,607 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:19,607 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:19,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116139599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:19,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116139600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:19,607 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:19,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116139602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:19,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:19,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116139604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:19,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741854_1030 (size=14341) 2024-11-20T15:21:19,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:19,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116139710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:19,712 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:19,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116139711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:19,713 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:19,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116139711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:19,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:19,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116139712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:19,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:19,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116139713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:19,723 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T15:21:19,723 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-20T15:21:19,725 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-20T15:21:19,725 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-20T15:21:19,726 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T15:21:19,727 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-20T15:21:19,727 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T15:21:19,727 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-20T15:21:19,729 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T15:21:19,729 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-20T15:21:19,915 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:19,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116139914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:19,916 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:19,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116139914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:19,918 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:19,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116139917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:19,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:19,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116139917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:19,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:19,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116139918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:19,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T15:21:19,969 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-20T15:21:19,973 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:21:19,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-20T15:21:19,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T15:21:19,977 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:21:19,978 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:21:19,978 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:21:20,024 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/5176bd14faaf4bd2aeb11d12b7eb1699 2024-11-20T15:21:20,043 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/477502956fda4ac2bdb89a2e65d85fe7 is 50, key is test_row_0/B:col10/1732116079568/Put/seqid=0 2024-11-20T15:21:20,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741855_1031 (size=12001) 2024-11-20T15:21:20,069 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/477502956fda4ac2bdb89a2e65d85fe7 2024-11-20T15:21:20,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T15:21:20,092 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/568995664b254ecf8deb717274403d55 is 50, key is test_row_0/C:col10/1732116079568/Put/seqid=0 2024-11-20T15:21:20,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741856_1032 (size=12001) 2024-11-20T15:21:20,115 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/568995664b254ecf8deb717274403d55 2024-11-20T15:21:20,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/5176bd14faaf4bd2aeb11d12b7eb1699 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/5176bd14faaf4bd2aeb11d12b7eb1699 2024-11-20T15:21:20,131 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:20,132 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T15:21:20,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:20,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:20,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:20,133 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:20,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:20,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:20,147 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/5176bd14faaf4bd2aeb11d12b7eb1699, entries=200, sequenceid=97, filesize=14.0 K 2024-11-20T15:21:20,149 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/477502956fda4ac2bdb89a2e65d85fe7 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/477502956fda4ac2bdb89a2e65d85fe7 2024-11-20T15:21:20,164 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/477502956fda4ac2bdb89a2e65d85fe7, entries=150, sequenceid=97, filesize=11.7 K 2024-11-20T15:21:20,167 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/568995664b254ecf8deb717274403d55 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/568995664b254ecf8deb717274403d55 2024-11-20T15:21:20,183 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/568995664b254ecf8deb717274403d55, entries=150, sequenceid=97, filesize=11.7 K 2024-11-20T15:21:20,186 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for d2e9d0b7937c41ae63a82116ea698557 in 618ms, sequenceid=97, compaction requested=true 2024-11-20T15:21:20,186 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:20,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:21:20,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:20,187 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:20,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:21:20,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:20,187 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:20,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:21:20,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:20,189 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38446 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:20,189 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/A is initiating minor compaction (all files) 2024-11-20T15:21:20,189 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/A in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:20,190 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/734bf2e426c34e269af3352afd063fa1, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/1c5b0266c5bf4a5b8df7cc366c63b1e2, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/5176bd14faaf4bd2aeb11d12b7eb1699] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=37.5 K 2024-11-20T15:21:20,190 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:20,191 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 734bf2e426c34e269af3352afd063fa1, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732116077341 2024-11-20T15:21:20,191 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/B is initiating minor compaction (all files) 2024-11-20T15:21:20,191 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/B in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:20,191 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/9c48194c21d946f191c8de3e4b750692, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/b41877d0403841478db5990fff8b3766, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/477502956fda4ac2bdb89a2e65d85fe7] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=35.3 K 2024-11-20T15:21:20,192 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c5b0266c5bf4a5b8df7cc366c63b1e2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732116077765 2024-11-20T15:21:20,192 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c48194c21d946f191c8de3e4b750692, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732116077341 2024-11-20T15:21:20,193 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting b41877d0403841478db5990fff8b3766, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732116077765 2024-11-20T15:21:20,193 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5176bd14faaf4bd2aeb11d12b7eb1699, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732116078939 2024-11-20T15:21:20,194 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 477502956fda4ac2bdb89a2e65d85fe7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732116078939 2024-11-20T15:21:20,214 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#B#compaction#18 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:20,216 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/1294b61bf867408cb9cf3c05e82ffac6 is 50, key is test_row_0/B:col10/1732116079568/Put/seqid=0 2024-11-20T15:21:20,216 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#A#compaction#19 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:20,217 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/a2365916160a413784df542a5b4bbac4 is 50, key is test_row_0/A:col10/1732116079568/Put/seqid=0 2024-11-20T15:21:20,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:20,225 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-20T15:21:20,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:20,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:20,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:20,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:20,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:20,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:20,251 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/d2970cbed588406fb3d6891a1a2e5ccd is 50, key is test_row_0/A:col10/1732116079601/Put/seqid=0 2024-11-20T15:21:20,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:20,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116140248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:20,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741857_1033 (size=12207) 2024-11-20T15:21:20,260 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:20,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116140255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:20,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:20,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116140255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:20,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:20,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116140258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:20,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:20,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116140258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:20,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741859_1035 (size=12001) 2024-11-20T15:21:20,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T15:21:20,280 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/d2970cbed588406fb3d6891a1a2e5ccd 2024-11-20T15:21:20,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741858_1034 (size=12207) 2024-11-20T15:21:20,286 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:20,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T15:21:20,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:20,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:20,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:20,288 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:20,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:20,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:20,303 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/8b757b05814c4946a1239dc5d0bdcfce is 50, key is test_row_0/B:col10/1732116079601/Put/seqid=0 2024-11-20T15:21:20,305 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/a2365916160a413784df542a5b4bbac4 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/a2365916160a413784df542a5b4bbac4 2024-11-20T15:21:20,318 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/A of d2e9d0b7937c41ae63a82116ea698557 into a2365916160a413784df542a5b4bbac4(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:20,318 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:20,318 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/A, priority=13, startTime=1732116080186; duration=0sec 2024-11-20T15:21:20,319 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:20,319 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:A 2024-11-20T15:21:20,319 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:20,321 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:20,321 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/C is initiating minor compaction (all files) 2024-11-20T15:21:20,321 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/C in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:20,322 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/5997f3f1083a4388ab8b9d137f3175a5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/96bcc62f36e54d32b94a0fee08b51cac, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/568995664b254ecf8deb717274403d55] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=35.3 K 2024-11-20T15:21:20,322 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5997f3f1083a4388ab8b9d137f3175a5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732116077341 2024-11-20T15:21:20,323 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 96bcc62f36e54d32b94a0fee08b51cac, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732116077765 2024-11-20T15:21:20,324 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 568995664b254ecf8deb717274403d55, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732116078939 2024-11-20T15:21:20,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741860_1036 (size=12001) 2024-11-20T15:21:20,359 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#C#compaction#22 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:20,360 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/0082f12332384355a0cdc169d3d3a236 is 50, key is test_row_0/C:col10/1732116079568/Put/seqid=0 2024-11-20T15:21:20,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:20,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116140363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:20,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:20,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116140363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:20,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:20,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741861_1037 (size=12207) 2024-11-20T15:21:20,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116140363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:20,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:20,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116140365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:20,372 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:20,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116140365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:20,442 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:20,443 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T15:21:20,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:20,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:20,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:20,443 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:20,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:20,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:20,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:20,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116140570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:20,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:20,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116140573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:20,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:20,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116140573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:20,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:20,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116140574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:20,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:20,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116140575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:20,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T15:21:20,596 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:20,597 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T15:21:20,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:20,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:20,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:20,598 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:20,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:20,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:20,672 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/1294b61bf867408cb9cf3c05e82ffac6 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/1294b61bf867408cb9cf3c05e82ffac6 2024-11-20T15:21:20,685 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/B of d2e9d0b7937c41ae63a82116ea698557 into 1294b61bf867408cb9cf3c05e82ffac6(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:20,685 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:20,685 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/B, priority=13, startTime=1732116080187; duration=0sec 2024-11-20T15:21:20,685 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:20,685 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:B 2024-11-20T15:21:20,749 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/8b757b05814c4946a1239dc5d0bdcfce 2024-11-20T15:21:20,771 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/879215701c0444318d698bc57fffa516 is 50, key is test_row_0/C:col10/1732116079601/Put/seqid=0 2024-11-20T15:21:20,774 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:20,775 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T15:21:20,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:20,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:20,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:20,776 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:20,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:20,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:20,785 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/0082f12332384355a0cdc169d3d3a236 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/0082f12332384355a0cdc169d3d3a236 2024-11-20T15:21:20,799 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/C of d2e9d0b7937c41ae63a82116ea698557 into 0082f12332384355a0cdc169d3d3a236(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:20,799 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:20,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741862_1038 (size=12001) 2024-11-20T15:21:20,799 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/C, priority=13, startTime=1732116080187; duration=0sec 2024-11-20T15:21:20,801 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:20,801 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:C 2024-11-20T15:21:20,802 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/879215701c0444318d698bc57fffa516 2024-11-20T15:21:20,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/d2970cbed588406fb3d6891a1a2e5ccd as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/d2970cbed588406fb3d6891a1a2e5ccd 2024-11-20T15:21:20,835 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/d2970cbed588406fb3d6891a1a2e5ccd, entries=150, sequenceid=116, filesize=11.7 K 2024-11-20T15:21:20,837 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/8b757b05814c4946a1239dc5d0bdcfce as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/8b757b05814c4946a1239dc5d0bdcfce 2024-11-20T15:21:20,851 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/8b757b05814c4946a1239dc5d0bdcfce, entries=150, sequenceid=116, filesize=11.7 K 2024-11-20T15:21:20,854 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/879215701c0444318d698bc57fffa516 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/879215701c0444318d698bc57fffa516 2024-11-20T15:21:20,870 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/879215701c0444318d698bc57fffa516, entries=150, sequenceid=116, filesize=11.7 K 2024-11-20T15:21:20,873 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for d2e9d0b7937c41ae63a82116ea698557 in 648ms, sequenceid=116, compaction requested=false 2024-11-20T15:21:20,873 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:20,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:20,881 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-20T15:21:20,882 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:20,882 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:20,882 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:20,882 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:20,882 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:20,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:20,894 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/3811f16a80634f0c8b80cac89cf2a851 is 50, key is test_row_0/A:col10/1732116080254/Put/seqid=0 2024-11-20T15:21:20,914 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:20,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116140905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:20,915 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:20,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116140907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:20,915 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:20,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116140910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:20,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741863_1039 (size=12101) 2024-11-20T15:21:20,918 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/3811f16a80634f0c8b80cac89cf2a851 2024-11-20T15:21:20,918 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:20,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116140914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:20,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:20,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116140918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:20,931 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:20,932 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T15:21:20,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:20,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:20,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:20,933 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:20,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:20,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:20,942 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/63563e7f3e8a41ab91926d8e43d84416 is 50, key is test_row_0/B:col10/1732116080254/Put/seqid=0 2024-11-20T15:21:20,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741864_1040 (size=12101) 2024-11-20T15:21:20,972 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/63563e7f3e8a41ab91926d8e43d84416 2024-11-20T15:21:20,987 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/83d6e6d49c514c34afe6a5c96f9b4a23 is 50, key is test_row_0/C:col10/1732116080254/Put/seqid=0 2024-11-20T15:21:21,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741865_1041 (size=12101) 2024-11-20T15:21:21,017 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/83d6e6d49c514c34afe6a5c96f9b4a23 2024-11-20T15:21:21,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116141017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116141018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,022 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116141020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,023 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116141022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116141025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,034 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/3811f16a80634f0c8b80cac89cf2a851 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/3811f16a80634f0c8b80cac89cf2a851 2024-11-20T15:21:21,047 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/3811f16a80634f0c8b80cac89cf2a851, entries=150, sequenceid=137, filesize=11.8 K 2024-11-20T15:21:21,049 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/63563e7f3e8a41ab91926d8e43d84416 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/63563e7f3e8a41ab91926d8e43d84416 2024-11-20T15:21:21,061 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/63563e7f3e8a41ab91926d8e43d84416, entries=150, sequenceid=137, filesize=11.8 K 2024-11-20T15:21:21,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/83d6e6d49c514c34afe6a5c96f9b4a23 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/83d6e6d49c514c34afe6a5c96f9b4a23 2024-11-20T15:21:21,075 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/83d6e6d49c514c34afe6a5c96f9b4a23, entries=150, sequenceid=137, filesize=11.8 K 2024-11-20T15:21:21,076 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for d2e9d0b7937c41ae63a82116ea698557 in 195ms, sequenceid=137, compaction requested=true 2024-11-20T15:21:21,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:21,077 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:21,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:21:21,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:21,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:21:21,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:21,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:21:21,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:21,077 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:21,080 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:21,081 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/A is initiating minor compaction (all files) 2024-11-20T15:21:21,081 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/A in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:21,081 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/a2365916160a413784df542a5b4bbac4, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/d2970cbed588406fb3d6891a1a2e5ccd, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/3811f16a80634f0c8b80cac89cf2a851] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=35.5 K 2024-11-20T15:21:21,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T15:21:21,082 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:21,082 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/B is initiating minor compaction (all files) 2024-11-20T15:21:21,082 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/B in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:21,083 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/1294b61bf867408cb9cf3c05e82ffac6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/8b757b05814c4946a1239dc5d0bdcfce, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/63563e7f3e8a41ab91926d8e43d84416] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=35.5 K 2024-11-20T15:21:21,083 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting a2365916160a413784df542a5b4bbac4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732116078939 2024-11-20T15:21:21,084 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 1294b61bf867408cb9cf3c05e82ffac6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732116078939 2024-11-20T15:21:21,084 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting d2970cbed588406fb3d6891a1a2e5ccd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732116079591 2024-11-20T15:21:21,085 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b757b05814c4946a1239dc5d0bdcfce, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732116079591 2024-11-20T15:21:21,085 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3811f16a80634f0c8b80cac89cf2a851, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732116080227 2024-11-20T15:21:21,087 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,087 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 63563e7f3e8a41ab91926d8e43d84416, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732116080227 2024-11-20T15:21:21,088 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T15:21:21,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:21,088 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-20T15:21:21,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:21,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:21,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:21,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:21,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:21,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:21,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/631c3a945bc649089ecb47e740cf19f2 is 50, key is test_row_0/A:col10/1732116080904/Put/seqid=0 2024-11-20T15:21:21,123 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#B#compaction#28 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:21,124 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/6912c869a64841409b6e19c926fd8480 is 50, key is test_row_0/B:col10/1732116080254/Put/seqid=0 2024-11-20T15:21:21,135 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#A#compaction#29 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:21,136 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/031556377f1f411b9cb3ed01b88cb304 is 50, key is test_row_0/A:col10/1732116080254/Put/seqid=0 2024-11-20T15:21:21,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741866_1042 (size=12151) 2024-11-20T15:21:21,163 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/631c3a945bc649089ecb47e740cf19f2 2024-11-20T15:21:21,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/72ec3d935be545b89b4d7aa1371f6e39 is 50, key is test_row_0/B:col10/1732116080904/Put/seqid=0 2024-11-20T15:21:21,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741867_1043 (size=12409) 2024-11-20T15:21:21,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741868_1044 (size=12409) 2024-11-20T15:21:21,195 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/6912c869a64841409b6e19c926fd8480 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/6912c869a64841409b6e19c926fd8480 2024-11-20T15:21:21,205 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/B of d2e9d0b7937c41ae63a82116ea698557 into 6912c869a64841409b6e19c926fd8480(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:21,205 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:21,205 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/B, priority=13, startTime=1732116081077; duration=0sec 2024-11-20T15:21:21,206 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:21,206 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:B 2024-11-20T15:21:21,206 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:21,207 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/031556377f1f411b9cb3ed01b88cb304 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/031556377f1f411b9cb3ed01b88cb304 2024-11-20T15:21:21,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741869_1045 (size=12151) 2024-11-20T15:21:21,209 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:21,209 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/C is initiating minor compaction (all files) 2024-11-20T15:21:21,209 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/C in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:21,210 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/0082f12332384355a0cdc169d3d3a236, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/879215701c0444318d698bc57fffa516, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/83d6e6d49c514c34afe6a5c96f9b4a23] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=35.5 K 2024-11-20T15:21:21,212 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 0082f12332384355a0cdc169d3d3a236, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732116078939 2024-11-20T15:21:21,213 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 879215701c0444318d698bc57fffa516, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732116079591 2024-11-20T15:21:21,215 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 83d6e6d49c514c34afe6a5c96f9b4a23, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732116080227 2024-11-20T15:21:21,216 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/A of d2e9d0b7937c41ae63a82116ea698557 into 031556377f1f411b9cb3ed01b88cb304(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:21,216 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:21,216 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/A, priority=13, startTime=1732116081076; duration=0sec 2024-11-20T15:21:21,216 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:21,217 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:A 2024-11-20T15:21:21,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:21,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:21,233 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#C#compaction#31 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:21,234 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/1d5f8de737f24dc1944028b8b44ffd55 is 50, key is test_row_0/C:col10/1732116080254/Put/seqid=0 2024-11-20T15:21:21,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116141241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116141242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,247 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116141244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116141246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,249 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116141246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741870_1046 (size=12409) 2024-11-20T15:21:21,272 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/1d5f8de737f24dc1944028b8b44ffd55 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/1d5f8de737f24dc1944028b8b44ffd55 2024-11-20T15:21:21,284 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/C of d2e9d0b7937c41ae63a82116ea698557 into 1d5f8de737f24dc1944028b8b44ffd55(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:21,284 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:21,284 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/C, priority=13, startTime=1732116081077; duration=0sec 2024-11-20T15:21:21,284 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:21,284 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:C 2024-11-20T15:21:21,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116141349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,350 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116141349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116141350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,351 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116141350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,352 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116141352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116141553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116141554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116141555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116141556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116141559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,609 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/72ec3d935be545b89b4d7aa1371f6e39 2024-11-20T15:21:21,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/d18957eac61a4602bdf23dbd45035b46 is 50, key is test_row_0/C:col10/1732116080904/Put/seqid=0 2024-11-20T15:21:21,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741871_1047 (size=12151) 2024-11-20T15:21:21,644 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/d18957eac61a4602bdf23dbd45035b46 2024-11-20T15:21:21,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/631c3a945bc649089ecb47e740cf19f2 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/631c3a945bc649089ecb47e740cf19f2 2024-11-20T15:21:21,666 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/631c3a945bc649089ecb47e740cf19f2, entries=150, sequenceid=156, filesize=11.9 K 2024-11-20T15:21:21,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/72ec3d935be545b89b4d7aa1371f6e39 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/72ec3d935be545b89b4d7aa1371f6e39 2024-11-20T15:21:21,677 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/72ec3d935be545b89b4d7aa1371f6e39, entries=150, sequenceid=156, filesize=11.9 K 2024-11-20T15:21:21,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/d18957eac61a4602bdf23dbd45035b46 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/d18957eac61a4602bdf23dbd45035b46 2024-11-20T15:21:21,688 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/d18957eac61a4602bdf23dbd45035b46, entries=150, sequenceid=156, filesize=11.9 K 2024-11-20T15:21:21,690 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for d2e9d0b7937c41ae63a82116ea698557 in 602ms, sequenceid=156, compaction requested=false 2024-11-20T15:21:21,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:21,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:21,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-20T15:21:21,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-20T15:21:21,695 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-20T15:21:21,696 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7140 sec 2024-11-20T15:21:21,698 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 1.7240 sec 2024-11-20T15:21:21,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:21,862 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-20T15:21:21,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:21,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:21,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:21,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:21,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:21,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:21,870 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/e086e39802c7472dba7ae2637ca24071 is 50, key is test_row_0/A:col10/1732116081860/Put/seqid=0 2024-11-20T15:21:21,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741872_1048 (size=12151) 2024-11-20T15:21:21,883 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=178 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/e086e39802c7472dba7ae2637ca24071 2024-11-20T15:21:21,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116141887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116141890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116141891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116141893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,898 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/3ee7bb1b692c446db9faa89ddc068b23 is 50, key is test_row_0/B:col10/1732116081860/Put/seqid=0 2024-11-20T15:21:21,899 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116141896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741873_1049 (size=12151) 2024-11-20T15:21:21,916 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=178 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/3ee7bb1b692c446db9faa89ddc068b23 2024-11-20T15:21:21,938 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/0df48eb2ac0e4d2c966e5e67586c18fe is 50, key is test_row_0/C:col10/1732116081860/Put/seqid=0 2024-11-20T15:21:21,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741874_1050 (size=12151) 2024-11-20T15:21:21,967 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=178 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/0df48eb2ac0e4d2c966e5e67586c18fe 2024-11-20T15:21:21,978 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/e086e39802c7472dba7ae2637ca24071 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/e086e39802c7472dba7ae2637ca24071 2024-11-20T15:21:21,992 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/e086e39802c7472dba7ae2637ca24071, entries=150, sequenceid=178, filesize=11.9 K 2024-11-20T15:21:21,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/3ee7bb1b692c446db9faa89ddc068b23 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/3ee7bb1b692c446db9faa89ddc068b23 2024-11-20T15:21:21,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116141996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:21,998 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:21,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116141997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,001 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116141999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,002 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116142001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,005 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116142001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,007 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/3ee7bb1b692c446db9faa89ddc068b23, entries=150, sequenceid=178, filesize=11.9 K 2024-11-20T15:21:22,010 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/0df48eb2ac0e4d2c966e5e67586c18fe as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/0df48eb2ac0e4d2c966e5e67586c18fe 2024-11-20T15:21:22,019 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/0df48eb2ac0e4d2c966e5e67586c18fe, entries=150, sequenceid=178, filesize=11.9 K 2024-11-20T15:21:22,021 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for d2e9d0b7937c41ae63a82116ea698557 in 160ms, sequenceid=178, compaction requested=true 2024-11-20T15:21:22,021 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:22,022 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:21:22,022 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:22,022 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:22,022 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:21:22,022 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:22,022 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:22,022 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:21:22,022 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:22,023 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:22,024 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/A is initiating minor compaction (all files) 2024-11-20T15:21:22,024 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:22,024 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/A in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:22,024 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/B is initiating minor compaction (all files) 2024-11-20T15:21:22,024 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/031556377f1f411b9cb3ed01b88cb304, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/631c3a945bc649089ecb47e740cf19f2, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/e086e39802c7472dba7ae2637ca24071] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=35.9 K 2024-11-20T15:21:22,024 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/B in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:22,024 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/6912c869a64841409b6e19c926fd8480, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/72ec3d935be545b89b4d7aa1371f6e39, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/3ee7bb1b692c446db9faa89ddc068b23] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=35.9 K 2024-11-20T15:21:22,025 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 031556377f1f411b9cb3ed01b88cb304, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732116080227 2024-11-20T15:21:22,025 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 6912c869a64841409b6e19c926fd8480, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732116080227 2024-11-20T15:21:22,026 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 631c3a945bc649089ecb47e740cf19f2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732116080904 2024-11-20T15:21:22,026 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 72ec3d935be545b89b4d7aa1371f6e39, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732116080904 2024-11-20T15:21:22,027 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting e086e39802c7472dba7ae2637ca24071, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1732116081244 2024-11-20T15:21:22,027 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ee7bb1b692c446db9faa89ddc068b23, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1732116081244 2024-11-20T15:21:22,046 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#A#compaction#36 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:22,048 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/f36b6f47959445089025bc7d3eeaee14 is 50, key is test_row_0/A:col10/1732116081860/Put/seqid=0 2024-11-20T15:21:22,051 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#B#compaction#37 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:22,051 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/e67fa22739f94ce29721a0be20783d41 is 50, key is test_row_0/B:col10/1732116081860/Put/seqid=0 2024-11-20T15:21:22,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741875_1051 (size=12561) 2024-11-20T15:21:22,080 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/f36b6f47959445089025bc7d3eeaee14 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/f36b6f47959445089025bc7d3eeaee14 2024-11-20T15:21:22,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741876_1052 (size=12561) 2024-11-20T15:21:22,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T15:21:22,084 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-20T15:21:22,086 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:21:22,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-20T15:21:22,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T15:21:22,090 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:21:22,091 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:21:22,091 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:21:22,099 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/A of d2e9d0b7937c41ae63a82116ea698557 into f36b6f47959445089025bc7d3eeaee14(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:22,099 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/e67fa22739f94ce29721a0be20783d41 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/e67fa22739f94ce29721a0be20783d41 2024-11-20T15:21:22,099 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:22,099 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/A, priority=13, startTime=1732116082021; duration=0sec 2024-11-20T15:21:22,099 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:22,099 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:A 2024-11-20T15:21:22,100 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:22,102 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:22,102 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/C is initiating minor compaction (all files) 2024-11-20T15:21:22,102 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/C in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:22,102 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/1d5f8de737f24dc1944028b8b44ffd55, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/d18957eac61a4602bdf23dbd45035b46, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/0df48eb2ac0e4d2c966e5e67586c18fe] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=35.9 K 2024-11-20T15:21:22,104 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d5f8de737f24dc1944028b8b44ffd55, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732116080227 2024-11-20T15:21:22,104 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting d18957eac61a4602bdf23dbd45035b46, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732116080904 2024-11-20T15:21:22,105 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0df48eb2ac0e4d2c966e5e67586c18fe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1732116081244 2024-11-20T15:21:22,108 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/B of d2e9d0b7937c41ae63a82116ea698557 into e67fa22739f94ce29721a0be20783d41(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:22,108 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:22,108 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/B, priority=13, startTime=1732116082022; duration=0sec 2024-11-20T15:21:22,108 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:22,108 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:B 2024-11-20T15:21:22,136 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#C#compaction#38 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:22,137 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/47309bd3a02640259056800de3153bad is 50, key is test_row_0/C:col10/1732116081860/Put/seqid=0 2024-11-20T15:21:22,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741877_1053 (size=12561) 2024-11-20T15:21:22,169 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/47309bd3a02640259056800de3153bad as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/47309bd3a02640259056800de3153bad 2024-11-20T15:21:22,182 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/C of d2e9d0b7937c41ae63a82116ea698557 into 47309bd3a02640259056800de3153bad(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:22,182 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:22,182 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/C, priority=13, startTime=1732116082022; duration=0sec 2024-11-20T15:21:22,182 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:22,182 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:C 2024-11-20T15:21:22,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T15:21:22,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:22,205 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-20T15:21:22,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:22,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:22,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:22,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:22,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:22,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:22,211 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/2d16e88be4b740c99ae04f8643eae81b is 50, key is test_row_0/A:col10/1732116081890/Put/seqid=0 2024-11-20T15:21:22,229 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116142222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,228 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116142222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,230 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116142224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,231 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116142227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,231 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116142229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741878_1054 (size=16931) 2024-11-20T15:21:22,242 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/2d16e88be4b740c99ae04f8643eae81b 2024-11-20T15:21:22,244 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,246 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T15:21:22,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:22,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:22,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:22,246 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:22,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:22,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:22,266 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/7aadc893b1da4b3b919f96ae17b740a1 is 50, key is test_row_0/B:col10/1732116081890/Put/seqid=0 2024-11-20T15:21:22,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741879_1055 (size=12151) 2024-11-20T15:21:22,288 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/7aadc893b1da4b3b919f96ae17b740a1 2024-11-20T15:21:22,307 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/2786c5b037e54855afe55cb1e8c5cd65 is 50, key is test_row_0/C:col10/1732116081890/Put/seqid=0 2024-11-20T15:21:22,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741880_1056 (size=12151) 2024-11-20T15:21:22,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116142331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,333 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116142332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,334 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,334 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116142332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116142333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,335 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116142333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,335 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/2786c5b037e54855afe55cb1e8c5cd65 2024-11-20T15:21:22,345 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/2d16e88be4b740c99ae04f8643eae81b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/2d16e88be4b740c99ae04f8643eae81b 2024-11-20T15:21:22,354 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/2d16e88be4b740c99ae04f8643eae81b, entries=250, sequenceid=201, filesize=16.5 K 2024-11-20T15:21:22,355 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/7aadc893b1da4b3b919f96ae17b740a1 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/7aadc893b1da4b3b919f96ae17b740a1 2024-11-20T15:21:22,361 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/7aadc893b1da4b3b919f96ae17b740a1, entries=150, sequenceid=201, filesize=11.9 K 2024-11-20T15:21:22,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/2786c5b037e54855afe55cb1e8c5cd65 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/2786c5b037e54855afe55cb1e8c5cd65 2024-11-20T15:21:22,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/2786c5b037e54855afe55cb1e8c5cd65, entries=150, sequenceid=201, filesize=11.9 K 2024-11-20T15:21:22,376 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for d2e9d0b7937c41ae63a82116ea698557 in 171ms, sequenceid=201, compaction requested=false 2024-11-20T15:21:22,376 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:22,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T15:21:22,400 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,401 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T15:21:22,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:22,401 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T15:21:22,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:22,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:22,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:22,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:22,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:22,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:22,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/6e085904f79f492eb0baa32ac5b45824 is 50, key is test_row_0/A:col10/1732116082220/Put/seqid=0 2024-11-20T15:21:22,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741881_1057 (size=12151) 2024-11-20T15:21:22,436 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/6e085904f79f492eb0baa32ac5b45824 2024-11-20T15:21:22,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/a7e24be5eb8f4a6cb797be641fd22a80 is 50, key is test_row_0/B:col10/1732116082220/Put/seqid=0 2024-11-20T15:21:22,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741882_1058 (size=12151) 2024-11-20T15:21:22,488 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/a7e24be5eb8f4a6cb797be641fd22a80 2024-11-20T15:21:22,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/0feaaf6776df4284889e4a8708dd2f5a is 50, key is test_row_0/C:col10/1732116082220/Put/seqid=0 2024-11-20T15:21:22,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741883_1059 (size=12151) 2024-11-20T15:21:22,529 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/0feaaf6776df4284889e4a8708dd2f5a 2024-11-20T15:21:22,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:22,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:22,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/6e085904f79f492eb0baa32ac5b45824 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/6e085904f79f492eb0baa32ac5b45824 2024-11-20T15:21:22,556 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/6e085904f79f492eb0baa32ac5b45824, entries=150, sequenceid=217, filesize=11.9 K 2024-11-20T15:21:22,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/a7e24be5eb8f4a6cb797be641fd22a80 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/a7e24be5eb8f4a6cb797be641fd22a80 2024-11-20T15:21:22,567 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/a7e24be5eb8f4a6cb797be641fd22a80, entries=150, sequenceid=217, filesize=11.9 K 2024-11-20T15:21:22,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/0feaaf6776df4284889e4a8708dd2f5a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/0feaaf6776df4284889e4a8708dd2f5a 2024-11-20T15:21:22,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116142569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,578 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/0feaaf6776df4284889e4a8708dd2f5a, entries=150, sequenceid=217, filesize=11.9 K 2024-11-20T15:21:22,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116142569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116142570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,580 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for d2e9d0b7937c41ae63a82116ea698557 in 179ms, sequenceid=217, compaction requested=true 2024-11-20T15:21:22,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:22,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:22,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-20T15:21:22,581 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T15:21:22,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:22,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:22,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:22,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:22,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:22,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:22,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:22,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-20T15:21:22,587 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-20T15:21:22,587 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 492 msec 2024-11-20T15:21:22,590 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 501 msec 2024-11-20T15:21:22,590 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/832b4d46ba5e43069beab8867d5a76cb is 50, key is test_row_0/A:col10/1732116082579/Put/seqid=0 2024-11-20T15:21:22,609 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116142607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116142609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741884_1060 (size=16931) 2024-11-20T15:21:22,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116142679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116142680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,682 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116142681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T15:21:22,694 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-20T15:21:22,696 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:21:22,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-20T15:21:22,699 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:21:22,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T15:21:22,701 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:21:22,701 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:21:22,714 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116142712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,714 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116142713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T15:21:22,852 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,853 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T15:21:22,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:22,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:22,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:22,853 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:22,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:22,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:22,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116142884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,886 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116142884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,886 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116142885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,918 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116142916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:22,918 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:22,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116142917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T15:21:23,008 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,009 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T15:21:23,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:23,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:23,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:23,010 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:23,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:23,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:23,023 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/832b4d46ba5e43069beab8867d5a76cb 2024-11-20T15:21:23,040 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/76eebf1f26be4d00a7d8f9a0145f64a5 is 50, key is test_row_0/B:col10/1732116082579/Put/seqid=0 2024-11-20T15:21:23,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741885_1061 (size=12151) 2024-11-20T15:21:23,060 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/76eebf1f26be4d00a7d8f9a0145f64a5 2024-11-20T15:21:23,075 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/044c4f3b89b14a5698e3fe509c10c1cb is 50, key is test_row_0/C:col10/1732116082579/Put/seqid=0 2024-11-20T15:21:23,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741886_1062 (size=12151) 2024-11-20T15:21:23,094 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/044c4f3b89b14a5698e3fe509c10c1cb 2024-11-20T15:21:23,105 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/832b4d46ba5e43069beab8867d5a76cb as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/832b4d46ba5e43069beab8867d5a76cb 2024-11-20T15:21:23,113 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/832b4d46ba5e43069beab8867d5a76cb, entries=250, sequenceid=240, filesize=16.5 K 2024-11-20T15:21:23,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/76eebf1f26be4d00a7d8f9a0145f64a5 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/76eebf1f26be4d00a7d8f9a0145f64a5 2024-11-20T15:21:23,122 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/76eebf1f26be4d00a7d8f9a0145f64a5, entries=150, sequenceid=240, filesize=11.9 K 2024-11-20T15:21:23,124 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/044c4f3b89b14a5698e3fe509c10c1cb as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/044c4f3b89b14a5698e3fe509c10c1cb 2024-11-20T15:21:23,132 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/044c4f3b89b14a5698e3fe509c10c1cb, entries=150, sequenceid=240, filesize=11.9 K 2024-11-20T15:21:23,134 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for d2e9d0b7937c41ae63a82116ea698557 in 553ms, sequenceid=240, compaction requested=true 2024-11-20T15:21:23,134 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:23,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:21:23,134 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:21:23,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:23,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:21:23,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:23,134 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:21:23,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:21:23,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:23,136 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 58574 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:21:23,136 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/A is initiating minor compaction (all files) 2024-11-20T15:21:23,136 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:21:23,136 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/A in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:23,136 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/B is initiating minor compaction (all files) 2024-11-20T15:21:23,136 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/f36b6f47959445089025bc7d3eeaee14, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/2d16e88be4b740c99ae04f8643eae81b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/6e085904f79f492eb0baa32ac5b45824, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/832b4d46ba5e43069beab8867d5a76cb] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=57.2 K 2024-11-20T15:21:23,136 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/B in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:23,136 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/e67fa22739f94ce29721a0be20783d41, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/7aadc893b1da4b3b919f96ae17b740a1, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/a7e24be5eb8f4a6cb797be641fd22a80, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/76eebf1f26be4d00a7d8f9a0145f64a5] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=47.9 K 2024-11-20T15:21:23,137 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting f36b6f47959445089025bc7d3eeaee14, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1732116081244 2024-11-20T15:21:23,137 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting e67fa22739f94ce29721a0be20783d41, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1732116081244 2024-11-20T15:21:23,137 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 7aadc893b1da4b3b919f96ae17b740a1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732116081887 2024-11-20T15:21:23,138 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d16e88be4b740c99ae04f8643eae81b, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732116081887 2024-11-20T15:21:23,138 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting a7e24be5eb8f4a6cb797be641fd22a80, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1732116082217 2024-11-20T15:21:23,138 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6e085904f79f492eb0baa32ac5b45824, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1732116082217 2024-11-20T15:21:23,139 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 76eebf1f26be4d00a7d8f9a0145f64a5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732116082562 2024-11-20T15:21:23,140 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 832b4d46ba5e43069beab8867d5a76cb, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732116082562 2024-11-20T15:21:23,158 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#A#compaction#48 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:23,159 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/9ee08841ae6a4aa587b9ff7bb1b02e7f is 50, key is test_row_0/A:col10/1732116082579/Put/seqid=0 2024-11-20T15:21:23,159 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#B#compaction#49 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:23,160 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/3566fdb8701b4064aebd442f71a302c4 is 50, key is test_row_0/B:col10/1732116082579/Put/seqid=0 2024-11-20T15:21:23,165 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,165 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T15:21:23,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:23,166 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T15:21:23,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:23,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:23,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:23,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:23,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:23,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:23,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/abf38656a12d4724a72effd5c7aa553c is 50, key is test_row_0/A:col10/1732116082605/Put/seqid=0 2024-11-20T15:21:23,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:23,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:23,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741888_1064 (size=12697) 2024-11-20T15:21:23,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741887_1063 (size=12697) 2024-11-20T15:21:23,217 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/3566fdb8701b4064aebd442f71a302c4 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/3566fdb8701b4064aebd442f71a302c4 2024-11-20T15:21:23,229 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/B of d2e9d0b7937c41ae63a82116ea698557 into 3566fdb8701b4064aebd442f71a302c4(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:23,229 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:23,229 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/B, priority=12, startTime=1732116083134; duration=0sec 2024-11-20T15:21:23,229 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:23,229 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:B 2024-11-20T15:21:23,229 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:21:23,233 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:21:23,233 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/C is initiating minor compaction (all files) 2024-11-20T15:21:23,233 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/C in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:23,233 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/47309bd3a02640259056800de3153bad, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/2786c5b037e54855afe55cb1e8c5cd65, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/0feaaf6776df4284889e4a8708dd2f5a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/044c4f3b89b14a5698e3fe509c10c1cb] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=47.9 K 2024-11-20T15:21:23,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741889_1065 (size=12151) 2024-11-20T15:21:23,235 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/abf38656a12d4724a72effd5c7aa553c 2024-11-20T15:21:23,235 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 47309bd3a02640259056800de3153bad, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1732116081244 2024-11-20T15:21:23,236 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 2786c5b037e54855afe55cb1e8c5cd65, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732116081887 2024-11-20T15:21:23,237 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 0feaaf6776df4284889e4a8708dd2f5a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1732116082217 2024-11-20T15:21:23,239 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 044c4f3b89b14a5698e3fe509c10c1cb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732116082562 2024-11-20T15:21:23,244 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:23,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116143232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:23,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116143234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,246 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:23,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116143235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/bb628b46a87a4df2aaf5dfe520299475 is 50, key is test_row_0/B:col10/1732116082605/Put/seqid=0 2024-11-20T15:21:23,249 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:23,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116143245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,250 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:23,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116143247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741890_1066 (size=12151) 2024-11-20T15:21:23,268 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/bb628b46a87a4df2aaf5dfe520299475 2024-11-20T15:21:23,279 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#C#compaction#52 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:23,280 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/4f7e05fb9adf4647b060b9a1482999f6 is 50, key is test_row_0/C:col10/1732116082579/Put/seqid=0 2024-11-20T15:21:23,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/ce93df6a2856427794067fd54adb4aff is 50, key is test_row_0/C:col10/1732116082605/Put/seqid=0 2024-11-20T15:21:23,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T15:21:23,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741891_1067 (size=12697) 2024-11-20T15:21:23,332 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/4f7e05fb9adf4647b060b9a1482999f6 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/4f7e05fb9adf4647b060b9a1482999f6 2024-11-20T15:21:23,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741892_1068 (size=12151) 2024-11-20T15:21:23,343 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/C of d2e9d0b7937c41ae63a82116ea698557 into 4f7e05fb9adf4647b060b9a1482999f6(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:23,344 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:23,344 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/C, priority=12, startTime=1732116083134; duration=0sec 2024-11-20T15:21:23,344 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:23,344 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:C 2024-11-20T15:21:23,351 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:23,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116143346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,352 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:23,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116143347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,352 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:23,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116143348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:23,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116143353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,355 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:23,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116143353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:23,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116143554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:23,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116143555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,558 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:23,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116143555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:23,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116143556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:23,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116143557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,612 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/9ee08841ae6a4aa587b9ff7bb1b02e7f as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/9ee08841ae6a4aa587b9ff7bb1b02e7f 2024-11-20T15:21:23,626 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/A of d2e9d0b7937c41ae63a82116ea698557 into 9ee08841ae6a4aa587b9ff7bb1b02e7f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:23,626 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:23,626 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/A, priority=12, startTime=1732116083134; duration=0sec 2024-11-20T15:21:23,627 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:23,627 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:A 2024-11-20T15:21:23,741 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/ce93df6a2856427794067fd54adb4aff 2024-11-20T15:21:23,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/abf38656a12d4724a72effd5c7aa553c as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/abf38656a12d4724a72effd5c7aa553c 2024-11-20T15:21:23,756 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/abf38656a12d4724a72effd5c7aa553c, entries=150, sequenceid=253, filesize=11.9 K 2024-11-20T15:21:23,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/bb628b46a87a4df2aaf5dfe520299475 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/bb628b46a87a4df2aaf5dfe520299475 2024-11-20T15:21:23,763 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/bb628b46a87a4df2aaf5dfe520299475, entries=150, sequenceid=253, filesize=11.9 K 2024-11-20T15:21:23,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/ce93df6a2856427794067fd54adb4aff as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/ce93df6a2856427794067fd54adb4aff 2024-11-20T15:21:23,772 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/ce93df6a2856427794067fd54adb4aff, entries=150, sequenceid=253, filesize=11.9 K 2024-11-20T15:21:23,773 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for d2e9d0b7937c41ae63a82116ea698557 in 607ms, sequenceid=253, compaction requested=false 2024-11-20T15:21:23,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:23,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:23,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-20T15:21:23,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-20T15:21:23,778 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-20T15:21:23,778 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0740 sec 2024-11-20T15:21:23,781 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.0830 sec 2024-11-20T15:21:23,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T15:21:23,804 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-20T15:21:23,808 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:21:23,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-20T15:21:23,811 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:21:23,812 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:21:23,812 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:21:23,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T15:21:23,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:23,864 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T15:21:23,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:23,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:23,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:23,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:23,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:23,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:23,874 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/79990afdd85c42bfa21cbb383d79e281 is 50, key is test_row_0/A:col10/1732116083863/Put/seqid=0 2024-11-20T15:21:23,877 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:23,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116143870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:23,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116143871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,879 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:23,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116143876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741893_1069 (size=12301) 2024-11-20T15:21:23,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:23,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116143877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,881 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=283 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/79990afdd85c42bfa21cbb383d79e281 2024-11-20T15:21:23,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:23,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116143881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,895 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/8d9a61d96091471294f8b692437679cf is 50, key is test_row_0/B:col10/1732116083863/Put/seqid=0 2024-11-20T15:21:23,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741894_1070 (size=12301) 2024-11-20T15:21:23,901 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=283 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/8d9a61d96091471294f8b692437679cf 2024-11-20T15:21:23,912 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/f41e6785491b45068585b259142faa44 is 50, key is test_row_0/C:col10/1732116083863/Put/seqid=0 2024-11-20T15:21:23,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T15:21:23,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741895_1071 (size=12301) 2024-11-20T15:21:23,966 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,966 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T15:21:23,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:23,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:23,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:23,967 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:23,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:23,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:23,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:23,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116143979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:23,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116143979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,983 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:23,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116143981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,983 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:23,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116143982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:23,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:23,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116143986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T15:21:24,121 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,121 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T15:21:24,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:24,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:24,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:24,122 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:24,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:24,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:24,185 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:24,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116144184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:24,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116144184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:24,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116144184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,187 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:24,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116144185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,195 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:24,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116144194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,275 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,276 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T15:21:24,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:24,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:24,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:24,277 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:24,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:24,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:24,319 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=283 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/f41e6785491b45068585b259142faa44 2024-11-20T15:21:24,328 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/79990afdd85c42bfa21cbb383d79e281 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/79990afdd85c42bfa21cbb383d79e281 2024-11-20T15:21:24,337 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/79990afdd85c42bfa21cbb383d79e281, entries=150, sequenceid=283, filesize=12.0 K 2024-11-20T15:21:24,339 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/8d9a61d96091471294f8b692437679cf as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/8d9a61d96091471294f8b692437679cf 2024-11-20T15:21:24,346 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/8d9a61d96091471294f8b692437679cf, entries=150, sequenceid=283, filesize=12.0 K 2024-11-20T15:21:24,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/f41e6785491b45068585b259142faa44 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f41e6785491b45068585b259142faa44 2024-11-20T15:21:24,355 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f41e6785491b45068585b259142faa44, entries=150, sequenceid=283, filesize=12.0 K 2024-11-20T15:21:24,358 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for d2e9d0b7937c41ae63a82116ea698557 in 495ms, sequenceid=283, compaction requested=true 2024-11-20T15:21:24,358 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:24,358 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:24,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:21:24,360 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:24,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:24,360 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/A is initiating minor compaction (all files) 2024-11-20T15:21:24,361 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:24,361 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/A in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:24,361 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/9ee08841ae6a4aa587b9ff7bb1b02e7f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/abf38656a12d4724a72effd5c7aa553c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/79990afdd85c42bfa21cbb383d79e281] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=36.3 K 2024-11-20T15:21:24,362 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ee08841ae6a4aa587b9ff7bb1b02e7f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732116082562 2024-11-20T15:21:24,362 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:24,362 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/B is initiating minor compaction (all files) 2024-11-20T15:21:24,362 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/B in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:24,362 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/3566fdb8701b4064aebd442f71a302c4, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/bb628b46a87a4df2aaf5dfe520299475, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/8d9a61d96091471294f8b692437679cf] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=36.3 K 2024-11-20T15:21:24,363 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting abf38656a12d4724a72effd5c7aa553c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732116082597 2024-11-20T15:21:24,363 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 3566fdb8701b4064aebd442f71a302c4, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732116082562 2024-11-20T15:21:24,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:21:24,364 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79990afdd85c42bfa21cbb383d79e281, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=283, earliestPutTs=1732116083233 2024-11-20T15:21:24,364 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting bb628b46a87a4df2aaf5dfe520299475, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732116082597 2024-11-20T15:21:24,365 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d9a61d96091471294f8b692437679cf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=283, earliestPutTs=1732116083233 2024-11-20T15:21:24,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:24,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:21:24,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:24,383 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#A#compaction#57 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:24,383 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/faeb1a637f2f477c994bf8918709cab3 is 50, key is test_row_0/A:col10/1732116083863/Put/seqid=0 2024-11-20T15:21:24,386 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#B#compaction#58 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:24,387 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/9a57fce5413f4594b65d08206ddac29e is 50, key is test_row_0/B:col10/1732116083863/Put/seqid=0 2024-11-20T15:21:24,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T15:21:24,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741896_1072 (size=12949) 2024-11-20T15:21:24,432 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,433 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T15:21:24,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:24,433 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T15:21:24,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:24,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:24,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:24,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:24,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:24,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:24,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741897_1073 (size=12949) 2024-11-20T15:21:24,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/aba26917ddb240e6b23843ee2a1af626 is 50, key is test_row_0/A:col10/1732116083875/Put/seqid=0 2024-11-20T15:21:24,448 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/faeb1a637f2f477c994bf8918709cab3 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/faeb1a637f2f477c994bf8918709cab3 2024-11-20T15:21:24,448 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/9a57fce5413f4594b65d08206ddac29e as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/9a57fce5413f4594b65d08206ddac29e 2024-11-20T15:21:24,456 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/A of d2e9d0b7937c41ae63a82116ea698557 into faeb1a637f2f477c994bf8918709cab3(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:24,456 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:24,456 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/A, priority=13, startTime=1732116084358; duration=0sec 2024-11-20T15:21:24,456 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:24,456 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:A 2024-11-20T15:21:24,457 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:24,459 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/B of d2e9d0b7937c41ae63a82116ea698557 into 9a57fce5413f4594b65d08206ddac29e(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:24,459 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:24,459 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/B, priority=13, startTime=1732116084360; duration=0sec 2024-11-20T15:21:24,459 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:24,459 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:B 2024-11-20T15:21:24,459 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:24,460 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/C is initiating minor compaction (all files) 2024-11-20T15:21:24,460 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/C in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:24,460 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/4f7e05fb9adf4647b060b9a1482999f6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/ce93df6a2856427794067fd54adb4aff, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f41e6785491b45068585b259142faa44] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=36.3 K 2024-11-20T15:21:24,460 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4f7e05fb9adf4647b060b9a1482999f6, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732116082562 2024-11-20T15:21:24,461 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce93df6a2856427794067fd54adb4aff, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732116082597 2024-11-20T15:21:24,461 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting f41e6785491b45068585b259142faa44, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=283, earliestPutTs=1732116083233 2024-11-20T15:21:24,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741898_1074 (size=12301) 2024-11-20T15:21:24,465 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/aba26917ddb240e6b23843ee2a1af626 2024-11-20T15:21:24,477 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#C#compaction#60 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:24,478 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/41a5afc92b9b41749a351ec0fc1ff862 is 50, key is test_row_0/C:col10/1732116083863/Put/seqid=0 2024-11-20T15:21:24,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/2bd33984da0c4c699e50a61b81e986d9 is 50, key is test_row_0/B:col10/1732116083875/Put/seqid=0 2024-11-20T15:21:24,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:24,493 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:24,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741899_1075 (size=12949) 2024-11-20T15:21:24,510 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/41a5afc92b9b41749a351ec0fc1ff862 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/41a5afc92b9b41749a351ec0fc1ff862 2024-11-20T15:21:24,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741900_1076 (size=12301) 2024-11-20T15:21:24,512 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/2bd33984da0c4c699e50a61b81e986d9 2024-11-20T15:21:24,524 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/C of d2e9d0b7937c41ae63a82116ea698557 into 41a5afc92b9b41749a351ec0fc1ff862(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:24,524 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:24,525 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/C, priority=13, startTime=1732116084368; duration=0sec 2024-11-20T15:21:24,525 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:24,525 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:C 2024-11-20T15:21:24,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/6ba83c1cea974c7da38ddcea33946470 is 50, key is test_row_0/C:col10/1732116083875/Put/seqid=0 2024-11-20T15:21:24,531 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:24,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116144523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,531 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:24,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116144525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:24,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116144525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:24,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116144528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:24,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116144529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741901_1077 (size=12301) 2024-11-20T15:21:24,557 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/6ba83c1cea974c7da38ddcea33946470 2024-11-20T15:21:24,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/aba26917ddb240e6b23843ee2a1af626 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/aba26917ddb240e6b23843ee2a1af626 2024-11-20T15:21:24,572 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/aba26917ddb240e6b23843ee2a1af626, entries=150, sequenceid=293, filesize=12.0 K 2024-11-20T15:21:24,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/2bd33984da0c4c699e50a61b81e986d9 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/2bd33984da0c4c699e50a61b81e986d9 2024-11-20T15:21:24,581 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/2bd33984da0c4c699e50a61b81e986d9, entries=150, sequenceid=293, filesize=12.0 K 2024-11-20T15:21:24,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/6ba83c1cea974c7da38ddcea33946470 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/6ba83c1cea974c7da38ddcea33946470 2024-11-20T15:21:24,589 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/6ba83c1cea974c7da38ddcea33946470, entries=150, sequenceid=293, filesize=12.0 K 2024-11-20T15:21:24,591 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for d2e9d0b7937c41ae63a82116ea698557 in 158ms, sequenceid=293, compaction requested=false 2024-11-20T15:21:24,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:24,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:24,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-20T15:21:24,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-20T15:21:24,597 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-20T15:21:24,597 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 782 msec 2024-11-20T15:21:24,600 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 790 msec 2024-11-20T15:21:24,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:24,636 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T15:21:24,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:24,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:24,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:24,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:24,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:24,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:24,645 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:24,646 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/62eace6fb58c47379608e773cd613f67 is 50, key is test_row_0/A:col10/1732116084525/Put/seqid=0 2024-11-20T15:21:24,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116144641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,646 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:24,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116144643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,647 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:24,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116144644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,647 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:24,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116144646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,648 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:24,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116144646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741902_1078 (size=17181) 2024-11-20T15:21:24,657 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/62eace6fb58c47379608e773cd613f67 2024-11-20T15:21:24,667 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/e7edb6ff62984600ac4dae524ba544c4 is 50, key is test_row_0/B:col10/1732116084525/Put/seqid=0 2024-11-20T15:21:24,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741903_1079 (size=12301) 2024-11-20T15:21:24,685 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/e7edb6ff62984600ac4dae524ba544c4 2024-11-20T15:21:24,698 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/f39e956b59f64e5d89063264a91b6056 is 50, key is test_row_0/C:col10/1732116084525/Put/seqid=0 2024-11-20T15:21:24,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741904_1080 (size=12301) 2024-11-20T15:21:24,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:24,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116144747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:24,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116144748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:24,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116144748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:24,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116144749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:24,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116144751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T15:21:24,920 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-20T15:21:24,922 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:21:24,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-20T15:21:24,924 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:21:24,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T15:21:24,926 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:21:24,926 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:21:24,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:24,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116144951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:24,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116144952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:24,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116144954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:24,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116144954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:24,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:24,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116144956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:25,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T15:21:25,078 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:25,079 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T15:21:25,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:25,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:25,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:25,080 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:25,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:25,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:25,106 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/f39e956b59f64e5d89063264a91b6056 2024-11-20T15:21:25,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/62eace6fb58c47379608e773cd613f67 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/62eace6fb58c47379608e773cd613f67 2024-11-20T15:21:25,123 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/62eace6fb58c47379608e773cd613f67, entries=250, sequenceid=325, filesize=16.8 K 2024-11-20T15:21:25,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/e7edb6ff62984600ac4dae524ba544c4 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/e7edb6ff62984600ac4dae524ba544c4 2024-11-20T15:21:25,134 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/e7edb6ff62984600ac4dae524ba544c4, entries=150, sequenceid=325, filesize=12.0 K 2024-11-20T15:21:25,136 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/f39e956b59f64e5d89063264a91b6056 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f39e956b59f64e5d89063264a91b6056 2024-11-20T15:21:25,144 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f39e956b59f64e5d89063264a91b6056, entries=150, sequenceid=325, filesize=12.0 K 2024-11-20T15:21:25,145 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=33.54 KB/34350 for d2e9d0b7937c41ae63a82116ea698557 in 510ms, sequenceid=325, compaction requested=true 2024-11-20T15:21:25,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:25,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:21:25,146 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:25,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:25,146 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:25,148 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:25,148 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/B is initiating minor compaction (all files) 2024-11-20T15:21:25,148 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/B in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:25,148 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/9a57fce5413f4594b65d08206ddac29e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/2bd33984da0c4c699e50a61b81e986d9, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/e7edb6ff62984600ac4dae524ba544c4] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=36.7 K 2024-11-20T15:21:25,150 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42431 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:25,150 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/A is initiating minor compaction (all files) 2024-11-20T15:21:25,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:21:25,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:25,150 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/A in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:25,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:21:25,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:25,150 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/faeb1a637f2f477c994bf8918709cab3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/aba26917ddb240e6b23843ee2a1af626, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/62eace6fb58c47379608e773cd613f67] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=41.4 K 2024-11-20T15:21:25,150 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a57fce5413f4594b65d08206ddac29e, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=283, earliestPutTs=1732116083233 2024-11-20T15:21:25,151 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting faeb1a637f2f477c994bf8918709cab3, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=283, earliestPutTs=1732116083233 2024-11-20T15:21:25,151 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting aba26917ddb240e6b23843ee2a1af626, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732116083868 2024-11-20T15:21:25,151 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 2bd33984da0c4c699e50a61b81e986d9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732116083868 2024-11-20T15:21:25,152 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 62eace6fb58c47379608e773cd613f67, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732116084523 2024-11-20T15:21:25,152 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting e7edb6ff62984600ac4dae524ba544c4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732116084525 2024-11-20T15:21:25,180 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#B#compaction#67 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:25,181 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/1225b51efe834f6fbf7c458626a90272 is 50, key is test_row_0/B:col10/1732116084525/Put/seqid=0 2024-11-20T15:21:25,182 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#A#compaction#66 average throughput is 0.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:25,184 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/887cfd89e7094ea0a171ee44b0b05d02 is 50, key is test_row_0/A:col10/1732116084525/Put/seqid=0 2024-11-20T15:21:25,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741905_1081 (size=13051) 2024-11-20T15:21:25,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741906_1082 (size=13051) 2024-11-20T15:21:25,209 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/1225b51efe834f6fbf7c458626a90272 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/1225b51efe834f6fbf7c458626a90272 2024-11-20T15:21:25,216 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/B of d2e9d0b7937c41ae63a82116ea698557 into 1225b51efe834f6fbf7c458626a90272(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:25,217 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:25,217 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/B, priority=13, startTime=1732116085146; duration=0sec 2024-11-20T15:21:25,218 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:25,218 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:B 2024-11-20T15:21:25,218 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:25,226 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:25,226 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/C is initiating minor compaction (all files) 2024-11-20T15:21:25,227 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/C in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:25,227 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/41a5afc92b9b41749a351ec0fc1ff862, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/6ba83c1cea974c7da38ddcea33946470, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f39e956b59f64e5d89063264a91b6056] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=36.7 K 2024-11-20T15:21:25,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T15:21:25,230 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 41a5afc92b9b41749a351ec0fc1ff862, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=283, earliestPutTs=1732116083233 2024-11-20T15:21:25,231 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ba83c1cea974c7da38ddcea33946470, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732116083868 2024-11-20T15:21:25,231 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting f39e956b59f64e5d89063264a91b6056, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732116084525 2024-11-20T15:21:25,233 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:25,233 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T15:21:25,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:25,234 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T15:21:25,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:25,234 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/887cfd89e7094ea0a171ee44b0b05d02 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/887cfd89e7094ea0a171ee44b0b05d02 2024-11-20T15:21:25,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:25,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:25,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:25,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:25,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:25,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/dfa8e41a61da45b19d17777b70b430f5 is 50, key is test_row_2/A:col10/1732116084643/Put/seqid=0 2024-11-20T15:21:25,246 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/A of d2e9d0b7937c41ae63a82116ea698557 into 887cfd89e7094ea0a171ee44b0b05d02(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:25,247 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:25,247 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/A, priority=13, startTime=1732116085146; duration=0sec 2024-11-20T15:21:25,247 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:25,247 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:A 2024-11-20T15:21:25,250 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#C#compaction#69 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:25,251 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/c3cc71f3777e420fa537d04664780b00 is 50, key is test_row_0/C:col10/1732116084525/Put/seqid=0 2024-11-20T15:21:25,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:25,262 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:25,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741907_1083 (size=7415) 2024-11-20T15:21:25,265 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/dfa8e41a61da45b19d17777b70b430f5 2024-11-20T15:21:25,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741908_1084 (size=13051) 2024-11-20T15:21:25,291 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:25,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116145288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:25,292 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:25,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116145288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:25,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:25,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116145288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:25,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:25,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116145292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:25,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:25,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116145292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:25,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/40554cb0a1d24f039bc62b7108decca8 is 50, key is test_row_2/B:col10/1732116084643/Put/seqid=0 2024-11-20T15:21:25,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741909_1085 (size=7415) 2024-11-20T15:21:25,327 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/40554cb0a1d24f039bc62b7108decca8 2024-11-20T15:21:25,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/57aec85a79b74af39bb0502faacab1b2 is 50, key is test_row_2/C:col10/1732116084643/Put/seqid=0 2024-11-20T15:21:25,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741910_1086 (size=7415) 2024-11-20T15:21:25,360 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/57aec85a79b74af39bb0502faacab1b2 2024-11-20T15:21:25,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/dfa8e41a61da45b19d17777b70b430f5 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/dfa8e41a61da45b19d17777b70b430f5 2024-11-20T15:21:25,378 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/dfa8e41a61da45b19d17777b70b430f5, entries=50, sequenceid=334, filesize=7.2 K 2024-11-20T15:21:25,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/40554cb0a1d24f039bc62b7108decca8 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/40554cb0a1d24f039bc62b7108decca8 2024-11-20T15:21:25,385 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/40554cb0a1d24f039bc62b7108decca8, entries=50, sequenceid=334, filesize=7.2 K 2024-11-20T15:21:25,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/57aec85a79b74af39bb0502faacab1b2 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/57aec85a79b74af39bb0502faacab1b2 2024-11-20T15:21:25,395 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/57aec85a79b74af39bb0502faacab1b2, entries=50, sequenceid=334, filesize=7.2 K 2024-11-20T15:21:25,397 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for d2e9d0b7937c41ae63a82116ea698557 in 163ms, sequenceid=334, compaction requested=false 2024-11-20T15:21:25,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:25,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:25,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-20T15:21:25,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-20T15:21:25,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:25,400 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=187.85 KB heapSize=492.94 KB 2024-11-20T15:21:25,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:25,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:25,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:25,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:25,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:25,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:25,403 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-20T15:21:25,403 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 473 msec 2024-11-20T15:21:25,404 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:25,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116145400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:25,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:25,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116145400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:25,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:25,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116145401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:25,406 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:25,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116145403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:25,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:25,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116145405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:25,409 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/5d08da8d6c694d15ab237707aa3804ff is 50, key is test_row_0/A:col10/1732116085397/Put/seqid=0 2024-11-20T15:21:25,411 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 483 msec 2024-11-20T15:21:25,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741911_1087 (size=12301) 2024-11-20T15:21:25,421 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/5d08da8d6c694d15ab237707aa3804ff 2024-11-20T15:21:25,433 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/220199c3b0da45d68555ef52d173d76a is 50, key is test_row_0/B:col10/1732116085397/Put/seqid=0 2024-11-20T15:21:25,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741912_1088 (size=12301) 2024-11-20T15:21:25,456 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/220199c3b0da45d68555ef52d173d76a 2024-11-20T15:21:25,467 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/3a244233f0c540888313e1313017c2f3 is 50, key is test_row_0/C:col10/1732116085397/Put/seqid=0 2024-11-20T15:21:25,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741913_1089 (size=12301) 2024-11-20T15:21:25,507 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:25,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116145506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:25,508 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:25,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116145507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:25,511 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:25,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116145511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:25,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:25,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116145512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:25,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T15:21:25,529 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-20T15:21:25,531 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:21:25,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-11-20T15:21:25,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T15:21:25,534 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:21:25,534 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:21:25,535 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:21:25,607 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:25,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116145607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:25,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T15:21:25,687 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:25,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T15:21:25,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:25,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:25,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:25,688 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:25,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:25,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:25,704 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/c3cc71f3777e420fa537d04664780b00 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/c3cc71f3777e420fa537d04664780b00 2024-11-20T15:21:25,711 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:25,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116145710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:25,713 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/C of d2e9d0b7937c41ae63a82116ea698557 into c3cc71f3777e420fa537d04664780b00(size=12.7 K), total size for store is 20.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:25,713 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:25,714 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/C, priority=13, startTime=1732116085150; duration=0sec 2024-11-20T15:21:25,714 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:25,714 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:C 2024-11-20T15:21:25,715 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:25,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116145713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:25,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:25,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116145714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:25,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:25,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116145715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:25,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T15:21:25,842 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:25,843 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T15:21:25,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:25,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:25,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:25,844 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:25,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:25,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:25,891 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/3a244233f0c540888313e1313017c2f3 2024-11-20T15:21:25,900 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/5d08da8d6c694d15ab237707aa3804ff as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/5d08da8d6c694d15ab237707aa3804ff 2024-11-20T15:21:25,906 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/5d08da8d6c694d15ab237707aa3804ff, entries=150, sequenceid=366, filesize=12.0 K 2024-11-20T15:21:25,908 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/220199c3b0da45d68555ef52d173d76a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/220199c3b0da45d68555ef52d173d76a 2024-11-20T15:21:25,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:25,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116145910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:25,915 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/220199c3b0da45d68555ef52d173d76a, entries=150, sequenceid=366, filesize=12.0 K 2024-11-20T15:21:25,917 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/3a244233f0c540888313e1313017c2f3 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/3a244233f0c540888313e1313017c2f3 2024-11-20T15:21:25,923 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/3a244233f0c540888313e1313017c2f3, entries=150, sequenceid=366, filesize=12.0 K 2024-11-20T15:21:25,924 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~187.85 KB/192360, heapSize ~492.89 KB/504720, currentSize=13.42 KB/13740 for d2e9d0b7937c41ae63a82116ea698557 in 524ms, sequenceid=366, compaction requested=true 2024-11-20T15:21:25,925 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:25,925 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:21:25,925 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:25,925 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:25,925 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:25,927 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 32767 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:25,927 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 32767 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:25,927 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/A is initiating minor compaction (all files) 2024-11-20T15:21:25,927 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/B is initiating minor compaction (all files) 2024-11-20T15:21:25,927 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/A in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:25,927 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/B in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:25,927 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/1225b51efe834f6fbf7c458626a90272, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/40554cb0a1d24f039bc62b7108decca8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/220199c3b0da45d68555ef52d173d76a] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=32.0 K 2024-11-20T15:21:25,927 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/887cfd89e7094ea0a171ee44b0b05d02, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/dfa8e41a61da45b19d17777b70b430f5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/5d08da8d6c694d15ab237707aa3804ff] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=32.0 K 2024-11-20T15:21:25,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:21:25,928 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 1225b51efe834f6fbf7c458626a90272, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732116084525 2024-11-20T15:21:25,928 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 887cfd89e7094ea0a171ee44b0b05d02, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732116084525 2024-11-20T15:21:25,929 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 40554cb0a1d24f039bc62b7108decca8, keycount=50, bloomtype=ROW, size=7.2 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732116084643 2024-11-20T15:21:25,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:25,929 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting dfa8e41a61da45b19d17777b70b430f5, keycount=50, bloomtype=ROW, size=7.2 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732116084643 2024-11-20T15:21:25,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:21:25,929 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 220199c3b0da45d68555ef52d173d76a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732116085395 2024-11-20T15:21:25,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:25,929 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d08da8d6c694d15ab237707aa3804ff, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732116085395 2024-11-20T15:21:25,942 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#B#compaction#75 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:25,942 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/2e85432f6d8e4f20b6a0dd28f036153d is 50, key is test_row_0/B:col10/1732116085397/Put/seqid=0 2024-11-20T15:21:25,957 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#A#compaction#76 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:25,958 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/b1870f4fd2f243a08b6842674b20cce6 is 50, key is test_row_0/A:col10/1732116085397/Put/seqid=0 2024-11-20T15:21:25,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741914_1090 (size=13153) 2024-11-20T15:21:25,973 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/2e85432f6d8e4f20b6a0dd28f036153d as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/2e85432f6d8e4f20b6a0dd28f036153d 2024-11-20T15:21:25,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741915_1091 (size=13153) 2024-11-20T15:21:25,984 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/B of d2e9d0b7937c41ae63a82116ea698557 into 2e85432f6d8e4f20b6a0dd28f036153d(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:25,984 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:25,984 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/B, priority=13, startTime=1732116085925; duration=0sec 2024-11-20T15:21:25,984 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:25,985 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:B 2024-11-20T15:21:25,985 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:25,987 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 32767 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:25,987 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/C is initiating minor compaction (all files) 2024-11-20T15:21:25,987 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/C in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:25,987 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/c3cc71f3777e420fa537d04664780b00, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/57aec85a79b74af39bb0502faacab1b2, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/3a244233f0c540888313e1313017c2f3] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=32.0 K 2024-11-20T15:21:25,988 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting c3cc71f3777e420fa537d04664780b00, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732116084525 2024-11-20T15:21:25,988 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 57aec85a79b74af39bb0502faacab1b2, keycount=50, bloomtype=ROW, size=7.2 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732116084643 2024-11-20T15:21:25,989 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a244233f0c540888313e1313017c2f3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732116085395 2024-11-20T15:21:25,997 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:25,997 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T15:21:25,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:25,998 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=13.42 KB heapSize=35.91 KB 2024-11-20T15:21:25,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:25,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:25,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:25,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:25,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:25,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:26,001 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#C#compaction#77 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:26,003 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/d4017f8bcee447659047beb01bd28e34 is 50, key is test_row_0/C:col10/1732116085397/Put/seqid=0 2024-11-20T15:21:26,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/6868d659bbaf407189ed6eb3275d2316 is 50, key is test_row_1/A:col10/1732116085401/Put/seqid=0 2024-11-20T15:21:26,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741916_1092 (size=13153) 2024-11-20T15:21:26,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741917_1093 (size=9857) 2024-11-20T15:21:26,062 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:26,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:26,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:26,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116146089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:26,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:26,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116146089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:26,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:26,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116146093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:26,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:26,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116146093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:26,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T15:21:26,198 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:26,198 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:26,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116146196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:26,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116146197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:26,199 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:26,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:26,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116146198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:26,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116146198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:26,387 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/b1870f4fd2f243a08b6842674b20cce6 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/b1870f4fd2f243a08b6842674b20cce6 2024-11-20T15:21:26,397 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/A of d2e9d0b7937c41ae63a82116ea698557 into b1870f4fd2f243a08b6842674b20cce6(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:26,398 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:26,398 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/A, priority=13, startTime=1732116085925; duration=0sec 2024-11-20T15:21:26,398 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:26,398 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:A 2024-11-20T15:21:26,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:26,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116146400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:26,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:26,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116146401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:26,404 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:26,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116146402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:26,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:26,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116146402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:26,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:26,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116146416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:26,430 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/d4017f8bcee447659047beb01bd28e34 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/d4017f8bcee447659047beb01bd28e34 2024-11-20T15:21:26,437 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/C of d2e9d0b7937c41ae63a82116ea698557 into d4017f8bcee447659047beb01bd28e34(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:26,438 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:26,438 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/C, priority=13, startTime=1732116085929; duration=0sec 2024-11-20T15:21:26,438 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:26,438 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:C 2024-11-20T15:21:26,439 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/6868d659bbaf407189ed6eb3275d2316 2024-11-20T15:21:26,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/bdcb08f508f847dba8efdc97f1d8cdf4 is 50, key is test_row_1/B:col10/1732116085401/Put/seqid=0 2024-11-20T15:21:26,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741918_1094 (size=9857) 2024-11-20T15:21:26,466 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/bdcb08f508f847dba8efdc97f1d8cdf4 2024-11-20T15:21:26,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/d85d4fc7b2714f548188dc3fdfe617ee is 50, key is test_row_1/C:col10/1732116085401/Put/seqid=0 2024-11-20T15:21:26,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741919_1095 (size=9857) 2024-11-20T15:21:26,488 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/d85d4fc7b2714f548188dc3fdfe617ee 2024-11-20T15:21:26,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/6868d659bbaf407189ed6eb3275d2316 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/6868d659bbaf407189ed6eb3275d2316 2024-11-20T15:21:26,505 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/6868d659bbaf407189ed6eb3275d2316, entries=100, sequenceid=373, filesize=9.6 K 2024-11-20T15:21:26,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/bdcb08f508f847dba8efdc97f1d8cdf4 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/bdcb08f508f847dba8efdc97f1d8cdf4 2024-11-20T15:21:26,515 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/bdcb08f508f847dba8efdc97f1d8cdf4, entries=100, sequenceid=373, filesize=9.6 K 2024-11-20T15:21:26,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/d85d4fc7b2714f548188dc3fdfe617ee as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/d85d4fc7b2714f548188dc3fdfe617ee 2024-11-20T15:21:26,523 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/d85d4fc7b2714f548188dc3fdfe617ee, entries=100, sequenceid=373, filesize=9.6 K 2024-11-20T15:21:26,525 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~13.42 KB/13740, heapSize ~35.86 KB/36720, currentSize=194.56 KB/199230 for d2e9d0b7937c41ae63a82116ea698557 in 526ms, sequenceid=373, compaction requested=false 2024-11-20T15:21:26,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:26,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:26,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-11-20T15:21:26,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-11-20T15:21:26,528 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-20T15:21:26,528 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 991 msec 2024-11-20T15:21:26,530 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 998 msec 2024-11-20T15:21:26,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T15:21:26,638 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-20T15:21:26,640 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:21:26,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-11-20T15:21:26,642 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:21:26,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T15:21:26,643 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:21:26,643 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:21:26,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:26,712 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=207.98 KB heapSize=545.67 KB 2024-11-20T15:21:26,712 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:26,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:26,712 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:26,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116146707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:26,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116146708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:26,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:26,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:26,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:26,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:26,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:26,714 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:26,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116146712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:26,714 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:26,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116146712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:26,720 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/79e2674b5b7043f48e3219b64aa0b9e3 is 50, key is test_row_0/A:col10/1732116086709/Put/seqid=0 2024-11-20T15:21:26,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741920_1096 (size=12301) 2024-11-20T15:21:26,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T15:21:26,794 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:26,795 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T15:21:26,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:26,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:26,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:26,795 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:26,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:26,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:26,817 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:26,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116146816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:26,818 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:26,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116146816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:26,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T15:21:26,948 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:26,948 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T15:21:26,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:26,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:26,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:26,949 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:26,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:26,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:27,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:27,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116147019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:27,020 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:27,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116147019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:27,103 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:27,103 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T15:21:27,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:27,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:27,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:27,104 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:27,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:27,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:27,131 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=69.33 KB at sequenceid=409 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/79e2674b5b7043f48e3219b64aa0b9e3 2024-11-20T15:21:27,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/b27b87b1df364f34aae45237ab49e0f6 is 50, key is test_row_0/B:col10/1732116086709/Put/seqid=0 2024-11-20T15:21:27,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741921_1097 (size=12301) 2024-11-20T15:21:27,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:27,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116147214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:27,218 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:27,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116147217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:27,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T15:21:27,256 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:27,257 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T15:21:27,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:27,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:27,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:27,258 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:27,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:27,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:27,321 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:27,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116147321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:27,323 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:27,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116147323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:27,412 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:27,413 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T15:21:27,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:27,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:27,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:27,413 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:27,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:27,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:27,420 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:27,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116147420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:27,553 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=69.33 KB at sequenceid=409 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/b27b87b1df364f34aae45237ab49e0f6 2024-11-20T15:21:27,566 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:27,567 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T15:21:27,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:27,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:27,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:27,567 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:27,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:27,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:27,571 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/4e23b2a12d5d4f37974261b0bd29f2c1 is 50, key is test_row_0/C:col10/1732116086709/Put/seqid=0 2024-11-20T15:21:27,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741922_1098 (size=12301) 2024-11-20T15:21:27,599 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=69.33 KB at sequenceid=409 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/4e23b2a12d5d4f37974261b0bd29f2c1 2024-11-20T15:21:27,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/79e2674b5b7043f48e3219b64aa0b9e3 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/79e2674b5b7043f48e3219b64aa0b9e3 2024-11-20T15:21:27,617 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/79e2674b5b7043f48e3219b64aa0b9e3, entries=150, sequenceid=409, filesize=12.0 K 2024-11-20T15:21:27,619 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/b27b87b1df364f34aae45237ab49e0f6 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/b27b87b1df364f34aae45237ab49e0f6 2024-11-20T15:21:27,627 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/b27b87b1df364f34aae45237ab49e0f6, entries=150, sequenceid=409, filesize=12.0 K 2024-11-20T15:21:27,629 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/4e23b2a12d5d4f37974261b0bd29f2c1 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/4e23b2a12d5d4f37974261b0bd29f2c1 2024-11-20T15:21:27,636 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/4e23b2a12d5d4f37974261b0bd29f2c1, entries=150, sequenceid=409, filesize=12.0 K 2024-11-20T15:21:27,638 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~207.98 KB/212970, heapSize ~545.63 KB/558720, currentSize=0 B/0 for d2e9d0b7937c41ae63a82116ea698557 in 927ms, sequenceid=409, compaction requested=true 2024-11-20T15:21:27,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:27,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:21:27,638 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:27,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:27,638 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:27,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:21:27,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:27,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:21:27,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:27,640 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35311 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:27,640 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/A is initiating minor compaction (all files) 2024-11-20T15:21:27,640 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/A in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:27,640 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35311 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:27,640 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/b1870f4fd2f243a08b6842674b20cce6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/6868d659bbaf407189ed6eb3275d2316, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/79e2674b5b7043f48e3219b64aa0b9e3] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=34.5 K 2024-11-20T15:21:27,640 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/B is initiating minor compaction (all files) 2024-11-20T15:21:27,640 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/B in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:27,641 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/2e85432f6d8e4f20b6a0dd28f036153d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/bdcb08f508f847dba8efdc97f1d8cdf4, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/b27b87b1df364f34aae45237ab49e0f6] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=34.5 K 2024-11-20T15:21:27,641 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e85432f6d8e4f20b6a0dd28f036153d, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732116085395 2024-11-20T15:21:27,641 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting b1870f4fd2f243a08b6842674b20cce6, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732116085395 2024-11-20T15:21:27,642 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting bdcb08f508f847dba8efdc97f1d8cdf4, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1732116085401 2024-11-20T15:21:27,642 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6868d659bbaf407189ed6eb3275d2316, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1732116085401 2024-11-20T15:21:27,643 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting b27b87b1df364f34aae45237ab49e0f6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1732116086085 2024-11-20T15:21:27,643 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79e2674b5b7043f48e3219b64aa0b9e3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1732116086085 2024-11-20T15:21:27,662 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#B#compaction#84 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:27,663 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/bf7f44416ba946edaf9fe775d2e96474 is 50, key is test_row_0/B:col10/1732116086709/Put/seqid=0 2024-11-20T15:21:27,667 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#A#compaction#85 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:27,668 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/d204d0853a844235b922639b40e8f87f is 50, key is test_row_0/A:col10/1732116086709/Put/seqid=0 2024-11-20T15:21:27,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741923_1099 (size=13255) 2024-11-20T15:21:27,681 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/bf7f44416ba946edaf9fe775d2e96474 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/bf7f44416ba946edaf9fe775d2e96474 2024-11-20T15:21:27,689 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/B of d2e9d0b7937c41ae63a82116ea698557 into bf7f44416ba946edaf9fe775d2e96474(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:27,689 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:27,689 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/B, priority=13, startTime=1732116087638; duration=0sec 2024-11-20T15:21:27,689 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:27,689 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:B 2024-11-20T15:21:27,689 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:27,691 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35311 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:27,692 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/C is initiating minor compaction (all files) 2024-11-20T15:21:27,692 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/C in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:27,692 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/d4017f8bcee447659047beb01bd28e34, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/d85d4fc7b2714f548188dc3fdfe617ee, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/4e23b2a12d5d4f37974261b0bd29f2c1] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=34.5 K 2024-11-20T15:21:27,692 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting d4017f8bcee447659047beb01bd28e34, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732116085395 2024-11-20T15:21:27,693 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting d85d4fc7b2714f548188dc3fdfe617ee, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1732116085401 2024-11-20T15:21:27,693 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e23b2a12d5d4f37974261b0bd29f2c1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1732116086085 2024-11-20T15:21:27,706 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#C#compaction#86 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:27,707 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/292551e2000144ffae4eb6a04e5b0185 is 50, key is test_row_0/C:col10/1732116086709/Put/seqid=0 2024-11-20T15:21:27,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741924_1100 (size=13255) 2024-11-20T15:21:27,720 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:27,721 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T15:21:27,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:27,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:27,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:27,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-20T15:21:27,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-11-20T15:21:27,726 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-20T15:21:27,726 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0810 sec 2024-11-20T15:21:27,729 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 1.0870 sec 2024-11-20T15:21:27,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741925_1101 (size=13255) 2024-11-20T15:21:27,742 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/292551e2000144ffae4eb6a04e5b0185 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/292551e2000144ffae4eb6a04e5b0185 2024-11-20T15:21:27,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T15:21:27,747 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-11-20T15:21:27,750 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/C of d2e9d0b7937c41ae63a82116ea698557 into 292551e2000144ffae4eb6a04e5b0185(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:27,750 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:27,750 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/C, priority=13, startTime=1732116087639; duration=0sec 2024-11-20T15:21:27,750 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:21:27,750 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:27,750 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:C 2024-11-20T15:21:27,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-11-20T15:21:27,752 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:21:27,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T15:21:27,753 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:21:27,753 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:21:27,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T15:21:27,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:27,864 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T15:21:27,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:27,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:27,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:27,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:27,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:27,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:27,870 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/640b7a91cfee4f89af8fe226c824ee9f is 50, key is test_row_0/A:col10/1732116087836/Put/seqid=0 2024-11-20T15:21:27,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741926_1102 (size=17181) 2024-11-20T15:21:27,895 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=424 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/640b7a91cfee4f89af8fe226c824ee9f 2024-11-20T15:21:27,905 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:27,906 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T15:21:27,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:27,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:27,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:27,907 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:27,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:27,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:27,912 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/1b68ec9d942a446d88c25a3b161b59c5 is 50, key is test_row_0/B:col10/1732116087836/Put/seqid=0 2024-11-20T15:21:27,925 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:27,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116147924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:27,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:27,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116147924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:27,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741927_1103 (size=12301) 2024-11-20T15:21:27,945 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=424 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/1b68ec9d942a446d88c25a3b161b59c5 2024-11-20T15:21:27,961 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/f38f4c3eb2764360b5ddbff670aa4458 is 50, key is test_row_0/C:col10/1732116087836/Put/seqid=0 2024-11-20T15:21:27,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741928_1104 (size=12301) 2024-11-20T15:21:27,983 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=424 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/f38f4c3eb2764360b5ddbff670aa4458 2024-11-20T15:21:27,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/640b7a91cfee4f89af8fe226c824ee9f as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/640b7a91cfee4f89af8fe226c824ee9f 2024-11-20T15:21:27,998 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/640b7a91cfee4f89af8fe226c824ee9f, entries=250, sequenceid=424, filesize=16.8 K 2024-11-20T15:21:28,001 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/1b68ec9d942a446d88c25a3b161b59c5 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/1b68ec9d942a446d88c25a3b161b59c5 2024-11-20T15:21:28,010 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/1b68ec9d942a446d88c25a3b161b59c5, entries=150, sequenceid=424, filesize=12.0 K 2024-11-20T15:21:28,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/f38f4c3eb2764360b5ddbff670aa4458 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f38f4c3eb2764360b5ddbff670aa4458 2024-11-20T15:21:28,025 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f38f4c3eb2764360b5ddbff670aa4458, entries=150, sequenceid=424, filesize=12.0 K 2024-11-20T15:21:28,026 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for d2e9d0b7937c41ae63a82116ea698557 in 162ms, sequenceid=424, compaction requested=false 2024-11-20T15:21:28,026 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:28,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:28,029 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T15:21:28,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:28,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:28,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:28,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:28,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:28,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:28,043 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/db0e03ca9c7449698135b21722c7f585 is 50, key is test_row_0/A:col10/1732116087913/Put/seqid=0 2024-11-20T15:21:28,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:28,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116148042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:28,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:28,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116148046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:28,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T15:21:28,059 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:28,060 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T15:21:28,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:28,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:28,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:28,060 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:28,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:28,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:28,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741929_1105 (size=14741) 2024-11-20T15:21:28,071 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/db0e03ca9c7449698135b21722c7f585 2024-11-20T15:21:28,086 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/200522c011454e638a8d10fe3b17899b is 50, key is test_row_0/B:col10/1732116087913/Put/seqid=0 2024-11-20T15:21:28,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741930_1106 (size=12301) 2024-11-20T15:21:28,097 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/200522c011454e638a8d10fe3b17899b 2024-11-20T15:21:28,119 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/15306d9a83c64cacb1cf8dd7c2c70ccd is 50, key is test_row_0/C:col10/1732116087913/Put/seqid=0 2024-11-20T15:21:28,122 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/d204d0853a844235b922639b40e8f87f as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/d204d0853a844235b922639b40e8f87f 2024-11-20T15:21:28,132 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/A of d2e9d0b7937c41ae63a82116ea698557 into d204d0853a844235b922639b40e8f87f(size=12.9 K), total size for store is 29.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:28,132 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:28,132 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/A, priority=13, startTime=1732116087638; duration=0sec 2024-11-20T15:21:28,132 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:28,132 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:A 2024-11-20T15:21:28,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:28,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116148149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:28,151 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:28,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116148149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:28,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741931_1107 (size=12301) 2024-11-20T15:21:28,213 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:28,214 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T15:21:28,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:28,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:28,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:28,214 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:28,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:28,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:28,224 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:28,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116148221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:28,224 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:28,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116148222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:28,353 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:28,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116148353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:28,355 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:28,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116148353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:28,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T15:21:28,366 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:28,367 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T15:21:28,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:28,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:28,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:28,367 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:28,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:28,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:28,519 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:28,520 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T15:21:28,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:28,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:28,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:28,520 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:28,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:28,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:28,555 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/15306d9a83c64cacb1cf8dd7c2c70ccd 2024-11-20T15:21:28,562 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/db0e03ca9c7449698135b21722c7f585 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/db0e03ca9c7449698135b21722c7f585 2024-11-20T15:21:28,569 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/db0e03ca9c7449698135b21722c7f585, entries=200, sequenceid=451, filesize=14.4 K 2024-11-20T15:21:28,571 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/200522c011454e638a8d10fe3b17899b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/200522c011454e638a8d10fe3b17899b 2024-11-20T15:21:28,576 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/200522c011454e638a8d10fe3b17899b, entries=150, sequenceid=451, filesize=12.0 K 2024-11-20T15:21:28,577 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/15306d9a83c64cacb1cf8dd7c2c70ccd as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/15306d9a83c64cacb1cf8dd7c2c70ccd 2024-11-20T15:21:28,586 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/15306d9a83c64cacb1cf8dd7c2c70ccd, entries=150, sequenceid=451, filesize=12.0 K 2024-11-20T15:21:28,587 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for d2e9d0b7937c41ae63a82116ea698557 in 558ms, sequenceid=451, compaction requested=true 2024-11-20T15:21:28,587 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:28,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:21:28,587 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:28,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:28,588 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:28,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:21:28,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:28,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:21:28,589 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 45177 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:28,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:28,589 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/A is initiating minor compaction (all files) 2024-11-20T15:21:28,589 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/A in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:28,589 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/d204d0853a844235b922639b40e8f87f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/640b7a91cfee4f89af8fe226c824ee9f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/db0e03ca9c7449698135b21722c7f585] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=44.1 K 2024-11-20T15:21:28,590 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:28,590 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/B is initiating minor compaction (all files) 2024-11-20T15:21:28,590 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/B in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:28,590 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/bf7f44416ba946edaf9fe775d2e96474, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/1b68ec9d942a446d88c25a3b161b59c5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/200522c011454e638a8d10fe3b17899b] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=37.0 K 2024-11-20T15:21:28,590 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting d204d0853a844235b922639b40e8f87f, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1732116086085 2024-11-20T15:21:28,591 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting bf7f44416ba946edaf9fe775d2e96474, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1732116086085 2024-11-20T15:21:28,591 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 640b7a91cfee4f89af8fe226c824ee9f, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=424, earliestPutTs=1732116087831 2024-11-20T15:21:28,591 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b68ec9d942a446d88c25a3b161b59c5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=424, earliestPutTs=1732116087831 2024-11-20T15:21:28,592 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting db0e03ca9c7449698135b21722c7f585, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1732116087913 2024-11-20T15:21:28,592 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 200522c011454e638a8d10fe3b17899b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1732116087913 2024-11-20T15:21:28,620 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#A#compaction#93 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:28,621 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/91b74f6ec1db420bb18544435b81c1d3 is 50, key is test_row_0/A:col10/1732116087913/Put/seqid=0 2024-11-20T15:21:28,633 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#B#compaction#94 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:28,633 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/f4482da00d3b4dd1b03c4c5cc8718552 is 50, key is test_row_0/B:col10/1732116087913/Put/seqid=0 2024-11-20T15:21:28,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741932_1108 (size=13357) 2024-11-20T15:21:28,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741933_1109 (size=13357) 2024-11-20T15:21:28,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:28,664 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T15:21:28,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:28,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:28,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:28,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:28,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:28,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:28,669 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/f4482da00d3b4dd1b03c4c5cc8718552 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/f4482da00d3b4dd1b03c4c5cc8718552 2024-11-20T15:21:28,672 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/0dea17b97894488bb70f61efcba42f50 is 50, key is test_row_0/A:col10/1732116088656/Put/seqid=0 2024-11-20T15:21:28,673 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:28,674 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T15:21:28,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:28,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:28,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:28,674 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:28,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:28,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:28,679 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/B of d2e9d0b7937c41ae63a82116ea698557 into f4482da00d3b4dd1b03c4c5cc8718552(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:28,679 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:28,679 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/B, priority=13, startTime=1732116088588; duration=0sec 2024-11-20T15:21:28,680 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:28,680 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:B 2024-11-20T15:21:28,680 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:28,681 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:28,681 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/C is initiating minor compaction (all files) 2024-11-20T15:21:28,681 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/C in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:28,681 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/292551e2000144ffae4eb6a04e5b0185, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f38f4c3eb2764360b5ddbff670aa4458, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/15306d9a83c64cacb1cf8dd7c2c70ccd] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=37.0 K 2024-11-20T15:21:28,682 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 292551e2000144ffae4eb6a04e5b0185, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1732116086085 2024-11-20T15:21:28,683 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting f38f4c3eb2764360b5ddbff670aa4458, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=424, earliestPutTs=1732116087831 2024-11-20T15:21:28,683 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 15306d9a83c64cacb1cf8dd7c2c70ccd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1732116087913 2024-11-20T15:21:28,708 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#C#compaction#96 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:28,709 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/17d467cdc3514ad4b3f0aa1dd4063eae is 50, key is test_row_0/C:col10/1732116087913/Put/seqid=0 2024-11-20T15:21:28,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741934_1110 (size=12301) 2024-11-20T15:21:28,714 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=464 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/0dea17b97894488bb70f61efcba42f50 2024-11-20T15:21:28,718 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:28,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116148717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:28,720 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:28,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116148718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:28,729 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/ad642adb76fa49b486eeb9a10310dbb7 is 50, key is test_row_0/B:col10/1732116088656/Put/seqid=0 2024-11-20T15:21:28,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741935_1111 (size=13357) 2024-11-20T15:21:28,741 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/17d467cdc3514ad4b3f0aa1dd4063eae as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/17d467cdc3514ad4b3f0aa1dd4063eae 2024-11-20T15:21:28,748 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/C of d2e9d0b7937c41ae63a82116ea698557 into 17d467cdc3514ad4b3f0aa1dd4063eae(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:28,748 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:28,748 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/C, priority=13, startTime=1732116088589; duration=0sec 2024-11-20T15:21:28,749 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:28,749 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:C 2024-11-20T15:21:28,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741936_1112 (size=12301) 2024-11-20T15:21:28,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:28,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116148820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:28,823 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:28,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116148822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:28,828 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:28,828 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T15:21:28,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:28,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:28,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:28,828 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:28,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:28,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:28,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T15:21:28,981 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:28,982 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T15:21:28,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:28,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:28,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:28,982 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:28,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:28,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:29,025 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:29,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116149025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:29,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:29,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116149025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:29,069 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/91b74f6ec1db420bb18544435b81c1d3 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/91b74f6ec1db420bb18544435b81c1d3 2024-11-20T15:21:29,078 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/A of d2e9d0b7937c41ae63a82116ea698557 into 91b74f6ec1db420bb18544435b81c1d3(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:29,078 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:29,078 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/A, priority=13, startTime=1732116088587; duration=0sec 2024-11-20T15:21:29,078 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:29,078 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:A 2024-11-20T15:21:29,135 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:29,135 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T15:21:29,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:29,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:29,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:29,136 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:29,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:29,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:29,151 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=464 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/ad642adb76fa49b486eeb9a10310dbb7 2024-11-20T15:21:29,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/1027c627e14b466e8812777448f855b9 is 50, key is test_row_0/C:col10/1732116088656/Put/seqid=0 2024-11-20T15:21:29,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741937_1113 (size=12301) 2024-11-20T15:21:29,180 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=464 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/1027c627e14b466e8812777448f855b9 2024-11-20T15:21:29,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/0dea17b97894488bb70f61efcba42f50 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/0dea17b97894488bb70f61efcba42f50 2024-11-20T15:21:29,201 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/0dea17b97894488bb70f61efcba42f50, entries=150, sequenceid=464, filesize=12.0 K 2024-11-20T15:21:29,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/ad642adb76fa49b486eeb9a10310dbb7 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/ad642adb76fa49b486eeb9a10310dbb7 2024-11-20T15:21:29,207 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/ad642adb76fa49b486eeb9a10310dbb7, entries=150, sequenceid=464, filesize=12.0 K 2024-11-20T15:21:29,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/1027c627e14b466e8812777448f855b9 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/1027c627e14b466e8812777448f855b9 2024-11-20T15:21:29,219 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/1027c627e14b466e8812777448f855b9, entries=150, sequenceid=464, filesize=12.0 K 2024-11-20T15:21:29,220 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for d2e9d0b7937c41ae63a82116ea698557 in 557ms, sequenceid=464, compaction requested=false 2024-11-20T15:21:29,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:29,288 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:29,289 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T15:21:29,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:29,289 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T15:21:29,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:29,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:29,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:29,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:29,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:29,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:29,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/e5419f1f87e94c508e83b226729bd5a0 is 50, key is test_row_0/A:col10/1732116088710/Put/seqid=0 2024-11-20T15:21:29,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741938_1114 (size=12301) 2024-11-20T15:21:29,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:29,331 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:29,341 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:29,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116149340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:29,342 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:29,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116149341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:29,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:29,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116149441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:29,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:29,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116149443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:29,444 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:29,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116149443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:29,444 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4152 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., hostname=0b62285ead89,33387,1732116069954, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:21:29,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:29,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116149645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:29,646 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:29,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116149645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:29,704 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=492 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/e5419f1f87e94c508e83b226729bd5a0 2024-11-20T15:21:29,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/0a597d3fb3b34685a4dfa4a9c79abfdc is 50, key is test_row_0/B:col10/1732116088710/Put/seqid=0 2024-11-20T15:21:29,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741939_1115 (size=12301) 2024-11-20T15:21:29,725 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=492 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/0a597d3fb3b34685a4dfa4a9c79abfdc 2024-11-20T15:21:29,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/5eb0791192f848e5afe55c4eaafe08da is 50, key is test_row_0/C:col10/1732116088710/Put/seqid=0 2024-11-20T15:21:29,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741940_1116 (size=12301) 2024-11-20T15:21:29,755 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=492 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/5eb0791192f848e5afe55c4eaafe08da 2024-11-20T15:21:29,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/e5419f1f87e94c508e83b226729bd5a0 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/e5419f1f87e94c508e83b226729bd5a0 2024-11-20T15:21:29,774 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/e5419f1f87e94c508e83b226729bd5a0, entries=150, sequenceid=492, filesize=12.0 K 2024-11-20T15:21:29,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/0a597d3fb3b34685a4dfa4a9c79abfdc as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/0a597d3fb3b34685a4dfa4a9c79abfdc 2024-11-20T15:21:29,787 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/0a597d3fb3b34685a4dfa4a9c79abfdc, entries=150, sequenceid=492, filesize=12.0 K 2024-11-20T15:21:29,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/5eb0791192f848e5afe55c4eaafe08da as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/5eb0791192f848e5afe55c4eaafe08da 2024-11-20T15:21:29,796 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/5eb0791192f848e5afe55c4eaafe08da, entries=150, sequenceid=492, filesize=12.0 K 2024-11-20T15:21:29,797 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for d2e9d0b7937c41ae63a82116ea698557 in 508ms, sequenceid=492, compaction requested=true 2024-11-20T15:21:29,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:29,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:29,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-20T15:21:29,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-11-20T15:21:29,800 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-11-20T15:21:29,801 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0450 sec 2024-11-20T15:21:29,802 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 2.0510 sec 2024-11-20T15:21:29,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T15:21:29,857 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-20T15:21:29,859 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:21:29,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees 2024-11-20T15:21:29,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T15:21:29,861 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:21:29,862 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:21:29,862 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:21:29,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:29,949 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T15:21:29,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:29,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:29,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:29,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:29,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:29,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:29,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/4bf478b88efd4d41a1db36adc2474757 is 50, key is test_row_0/A:col10/1732116089340/Put/seqid=0 2024-11-20T15:21:29,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741941_1117 (size=12301) 2024-11-20T15:21:29,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T15:21:29,965 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=504 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/4bf478b88efd4d41a1db36adc2474757 2024-11-20T15:21:29,977 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/eab2325abfc144029b319c246850a14b is 50, key is test_row_0/B:col10/1732116089340/Put/seqid=0 2024-11-20T15:21:29,995 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:29,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116149993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:29,997 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:29,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116149995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:30,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741942_1118 (size=12301) 2024-11-20T15:21:30,014 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:30,015 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-20T15:21:30,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:30,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:30,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:30,015 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:30,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:30,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:30,099 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:30,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116150097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:30,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:30,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116150099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:30,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T15:21:30,168 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:30,168 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-20T15:21:30,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:30,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:30,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:30,168 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:30,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:30,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:30,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:30,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116150228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:30,230 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4137 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., hostname=0b62285ead89,33387,1732116069954, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:21:30,239 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:30,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116150239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:30,240 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4151 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., hostname=0b62285ead89,33387,1732116069954, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:21:30,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:30,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116150300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:30,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:30,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116150302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:30,321 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:30,321 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-20T15:21:30,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:30,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:30,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:30,322 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:30,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:30,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:30,404 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=504 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/eab2325abfc144029b319c246850a14b 2024-11-20T15:21:30,414 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/48dd268d76c24f96879a703cccc318f2 is 50, key is test_row_0/C:col10/1732116089340/Put/seqid=0 2024-11-20T15:21:30,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741943_1119 (size=12301) 2024-11-20T15:21:30,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T15:21:30,474 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:30,475 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-20T15:21:30,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:30,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:30,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:30,475 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:30,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:30,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:30,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:30,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116150605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:30,610 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:30,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116150607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:30,627 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:30,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-20T15:21:30,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:30,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:30,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:30,628 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:30,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:30,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:30,781 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:30,782 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-20T15:21:30,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:30,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:30,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:30,782 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:30,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:30,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:30,820 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=504 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/48dd268d76c24f96879a703cccc318f2 2024-11-20T15:21:30,827 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/4bf478b88efd4d41a1db36adc2474757 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/4bf478b88efd4d41a1db36adc2474757 2024-11-20T15:21:30,832 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/4bf478b88efd4d41a1db36adc2474757, entries=150, sequenceid=504, filesize=12.0 K 2024-11-20T15:21:30,833 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/eab2325abfc144029b319c246850a14b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/eab2325abfc144029b319c246850a14b 2024-11-20T15:21:30,837 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/eab2325abfc144029b319c246850a14b, entries=150, sequenceid=504, filesize=12.0 K 2024-11-20T15:21:30,838 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/48dd268d76c24f96879a703cccc318f2 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/48dd268d76c24f96879a703cccc318f2 2024-11-20T15:21:30,843 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/48dd268d76c24f96879a703cccc318f2, entries=150, sequenceid=504, filesize=12.0 K 2024-11-20T15:21:30,844 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for d2e9d0b7937c41ae63a82116ea698557 in 895ms, sequenceid=504, compaction requested=true 2024-11-20T15:21:30,844 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:30,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:21:30,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:30,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:21:30,845 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:21:30,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:30,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:21:30,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:30,845 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:21:30,847 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50260 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:21:30,847 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/A is initiating minor compaction (all files) 2024-11-20T15:21:30,847 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/A in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:30,847 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/91b74f6ec1db420bb18544435b81c1d3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/0dea17b97894488bb70f61efcba42f50, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/e5419f1f87e94c508e83b226729bd5a0, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/4bf478b88efd4d41a1db36adc2474757] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=49.1 K 2024-11-20T15:21:30,847 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50260 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:21:30,847 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/B is initiating minor compaction (all files) 2024-11-20T15:21:30,848 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/B in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:30,848 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/f4482da00d3b4dd1b03c4c5cc8718552, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/ad642adb76fa49b486eeb9a10310dbb7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/0a597d3fb3b34685a4dfa4a9c79abfdc, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/eab2325abfc144029b319c246850a14b] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=49.1 K 2024-11-20T15:21:30,848 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 91b74f6ec1db420bb18544435b81c1d3, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1732116087913 2024-11-20T15:21:30,848 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0dea17b97894488bb70f61efcba42f50, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=464, earliestPutTs=1732116088656 2024-11-20T15:21:30,848 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting f4482da00d3b4dd1b03c4c5cc8718552, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1732116087913 2024-11-20T15:21:30,849 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting e5419f1f87e94c508e83b226729bd5a0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=492, earliestPutTs=1732116088694 2024-11-20T15:21:30,849 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting ad642adb76fa49b486eeb9a10310dbb7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=464, earliestPutTs=1732116088656 2024-11-20T15:21:30,849 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4bf478b88efd4d41a1db36adc2474757, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=504, earliestPutTs=1732116089339 2024-11-20T15:21:30,850 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a597d3fb3b34685a4dfa4a9c79abfdc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=492, earliestPutTs=1732116088694 2024-11-20T15:21:30,850 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting eab2325abfc144029b319c246850a14b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=504, earliestPutTs=1732116089339 2024-11-20T15:21:30,861 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#A#compaction#105 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:30,862 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/8c10126e56aa49c2bc527e7485d67eb7 is 50, key is test_row_0/A:col10/1732116089340/Put/seqid=0 2024-11-20T15:21:30,863 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#B#compaction#106 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:30,864 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/2e78e5b494414afbb5c9d79d299c50eb is 50, key is test_row_0/B:col10/1732116089340/Put/seqid=0 2024-11-20T15:21:30,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741944_1120 (size=13493) 2024-11-20T15:21:30,879 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/2e78e5b494414afbb5c9d79d299c50eb as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/2e78e5b494414afbb5c9d79d299c50eb 2024-11-20T15:21:30,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741945_1121 (size=13493) 2024-11-20T15:21:30,887 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/B of d2e9d0b7937c41ae63a82116ea698557 into 2e78e5b494414afbb5c9d79d299c50eb(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:30,887 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:30,887 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/B, priority=12, startTime=1732116090845; duration=0sec 2024-11-20T15:21:30,887 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:30,887 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:B 2024-11-20T15:21:30,887 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:21:30,889 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50260 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:21:30,889 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/C is initiating minor compaction (all files) 2024-11-20T15:21:30,889 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/C in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:30,889 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/17d467cdc3514ad4b3f0aa1dd4063eae, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/1027c627e14b466e8812777448f855b9, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/5eb0791192f848e5afe55c4eaafe08da, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/48dd268d76c24f96879a703cccc318f2] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=49.1 K 2024-11-20T15:21:30,890 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 17d467cdc3514ad4b3f0aa1dd4063eae, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1732116087913 2024-11-20T15:21:30,890 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 1027c627e14b466e8812777448f855b9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=464, earliestPutTs=1732116088656 2024-11-20T15:21:30,890 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 5eb0791192f848e5afe55c4eaafe08da, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=492, earliestPutTs=1732116088694 2024-11-20T15:21:30,891 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 48dd268d76c24f96879a703cccc318f2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=504, earliestPutTs=1732116089339 2024-11-20T15:21:30,902 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#C#compaction#107 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:30,903 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/319ccaa728754e439960eefe0febc211 is 50, key is test_row_0/C:col10/1732116089340/Put/seqid=0 2024-11-20T15:21:30,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741946_1122 (size=13493) 2024-11-20T15:21:30,934 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:30,935 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-20T15:21:30,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:30,935 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T15:21:30,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:30,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:30,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:30,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:30,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:30,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:30,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/e39014f940ea4fd19f43300135105112 is 50, key is test_row_0/A:col10/1732116089985/Put/seqid=0 2024-11-20T15:21:30,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741947_1123 (size=12301) 2024-11-20T15:21:30,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T15:21:31,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:31,113 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:31,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:31,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116151123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:31,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:31,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116151123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:31,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:31,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116151225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:31,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:31,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116151225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:31,293 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/8c10126e56aa49c2bc527e7485d67eb7 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/8c10126e56aa49c2bc527e7485d67eb7 2024-11-20T15:21:31,299 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/A of d2e9d0b7937c41ae63a82116ea698557 into 8c10126e56aa49c2bc527e7485d67eb7(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:31,299 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:31,299 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/A, priority=12, startTime=1732116090845; duration=0sec 2024-11-20T15:21:31,299 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:31,299 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:A 2024-11-20T15:21:31,318 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/319ccaa728754e439960eefe0febc211 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/319ccaa728754e439960eefe0febc211 2024-11-20T15:21:31,324 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/C of d2e9d0b7937c41ae63a82116ea698557 into 319ccaa728754e439960eefe0febc211(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:31,324 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:31,324 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/C, priority=12, startTime=1732116090845; duration=0sec 2024-11-20T15:21:31,325 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:31,325 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:C 2024-11-20T15:21:31,348 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=529 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/e39014f940ea4fd19f43300135105112 2024-11-20T15:21:31,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/9920ff5b8aad4da58f8a747a8c169740 is 50, key is test_row_0/B:col10/1732116089985/Put/seqid=0 2024-11-20T15:21:31,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741948_1124 (size=12301) 2024-11-20T15:21:31,362 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=529 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/9920ff5b8aad4da58f8a747a8c169740 2024-11-20T15:21:31,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/11b8b40e400c4d44b8a4f2edc9f6633a is 50, key is test_row_0/C:col10/1732116089985/Put/seqid=0 2024-11-20T15:21:31,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741949_1125 (size=12301) 2024-11-20T15:21:31,382 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=529 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/11b8b40e400c4d44b8a4f2edc9f6633a 2024-11-20T15:21:31,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/e39014f940ea4fd19f43300135105112 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/e39014f940ea4fd19f43300135105112 2024-11-20T15:21:31,396 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/e39014f940ea4fd19f43300135105112, entries=150, sequenceid=529, filesize=12.0 K 2024-11-20T15:21:31,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/9920ff5b8aad4da58f8a747a8c169740 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/9920ff5b8aad4da58f8a747a8c169740 2024-11-20T15:21:31,404 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/9920ff5b8aad4da58f8a747a8c169740, entries=150, sequenceid=529, filesize=12.0 K 2024-11-20T15:21:31,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/11b8b40e400c4d44b8a4f2edc9f6633a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/11b8b40e400c4d44b8a4f2edc9f6633a 2024-11-20T15:21:31,410 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/11b8b40e400c4d44b8a4f2edc9f6633a, entries=150, sequenceid=529, filesize=12.0 K 2024-11-20T15:21:31,412 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for d2e9d0b7937c41ae63a82116ea698557 in 477ms, sequenceid=529, compaction requested=false 2024-11-20T15:21:31,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:31,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:31,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=33 2024-11-20T15:21:31,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=33 2024-11-20T15:21:31,416 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-20T15:21:31,416 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5520 sec 2024-11-20T15:21:31,417 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees in 1.5570 sec 2024-11-20T15:21:31,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:31,431 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T15:21:31,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:31,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:31,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:31,432 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:31,432 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:31,432 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:31,438 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/6a2f6b7c2b44459c8a95759b0e2568fa is 50, key is test_row_1/A:col10/1732116091122/Put/seqid=0 2024-11-20T15:21:31,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741950_1126 (size=9857) 2024-11-20T15:21:31,444 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=545 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/6a2f6b7c2b44459c8a95759b0e2568fa 2024-11-20T15:21:31,457 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/6d270516b48f44ebbec920813401c027 is 50, key is test_row_1/B:col10/1732116091122/Put/seqid=0 2024-11-20T15:21:31,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741951_1127 (size=9857) 2024-11-20T15:21:31,471 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=545 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/6d270516b48f44ebbec920813401c027 2024-11-20T15:21:31,480 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/00db410f756d4242ad0f81dfe39c03d2 is 50, key is test_row_1/C:col10/1732116091122/Put/seqid=0 2024-11-20T15:21:31,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741952_1128 (size=9857) 2024-11-20T15:21:31,492 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=545 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/00db410f756d4242ad0f81dfe39c03d2 2024-11-20T15:21:31,498 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/6a2f6b7c2b44459c8a95759b0e2568fa as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/6a2f6b7c2b44459c8a95759b0e2568fa 2024-11-20T15:21:31,499 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:31,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116151497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:31,499 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:31,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116151498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:31,505 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/6a2f6b7c2b44459c8a95759b0e2568fa, entries=100, sequenceid=545, filesize=9.6 K 2024-11-20T15:21:31,506 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/6d270516b48f44ebbec920813401c027 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/6d270516b48f44ebbec920813401c027 2024-11-20T15:21:31,514 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/6d270516b48f44ebbec920813401c027, entries=100, sequenceid=545, filesize=9.6 K 2024-11-20T15:21:31,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/00db410f756d4242ad0f81dfe39c03d2 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/00db410f756d4242ad0f81dfe39c03d2 2024-11-20T15:21:31,522 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/00db410f756d4242ad0f81dfe39c03d2, entries=100, sequenceid=545, filesize=9.6 K 2024-11-20T15:21:31,523 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for d2e9d0b7937c41ae63a82116ea698557 in 92ms, sequenceid=545, compaction requested=true 2024-11-20T15:21:31,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:31,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:21:31,524 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:31,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:31,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:21:31,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:31,525 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:31,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:21:31,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:31,526 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35651 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:31,526 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/A is initiating minor compaction (all files) 2024-11-20T15:21:31,526 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/A in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:31,526 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/8c10126e56aa49c2bc527e7485d67eb7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/e39014f940ea4fd19f43300135105112, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/6a2f6b7c2b44459c8a95759b0e2568fa] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=34.8 K 2024-11-20T15:21:31,526 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c10126e56aa49c2bc527e7485d67eb7, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=504, earliestPutTs=1732116089339 2024-11-20T15:21:31,527 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting e39014f940ea4fd19f43300135105112, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=529, earliestPutTs=1732116089985 2024-11-20T15:21:31,527 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35651 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:31,528 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/B is initiating minor compaction (all files) 2024-11-20T15:21:31,528 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/B in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:31,528 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/2e78e5b494414afbb5c9d79d299c50eb, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/9920ff5b8aad4da58f8a747a8c169740, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/6d270516b48f44ebbec920813401c027] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=34.8 K 2024-11-20T15:21:31,528 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a2f6b7c2b44459c8a95759b0e2568fa, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=545, earliestPutTs=1732116091122 2024-11-20T15:21:31,528 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e78e5b494414afbb5c9d79d299c50eb, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=504, earliestPutTs=1732116089339 2024-11-20T15:21:31,529 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 9920ff5b8aad4da58f8a747a8c169740, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=529, earliestPutTs=1732116089985 2024-11-20T15:21:31,531 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d270516b48f44ebbec920813401c027, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=545, earliestPutTs=1732116091122 2024-11-20T15:21:31,541 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#A#compaction#114 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:31,542 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/d309bd0f7d924f00acb26c288b68bdd8 is 50, key is test_row_0/A:col10/1732116089985/Put/seqid=0 2024-11-20T15:21:31,546 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#B#compaction#115 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:31,547 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/a7724d4588f74a81a597a68e21d02950 is 50, key is test_row_0/B:col10/1732116089985/Put/seqid=0 2024-11-20T15:21:31,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741954_1130 (size=13595) 2024-11-20T15:21:31,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741953_1129 (size=13595) 2024-11-20T15:21:31,574 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/a7724d4588f74a81a597a68e21d02950 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/a7724d4588f74a81a597a68e21d02950 2024-11-20T15:21:31,580 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/B of d2e9d0b7937c41ae63a82116ea698557 into a7724d4588f74a81a597a68e21d02950(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:31,580 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:31,580 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/B, priority=13, startTime=1732116091525; duration=0sec 2024-11-20T15:21:31,580 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:31,580 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:B 2024-11-20T15:21:31,580 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:31,582 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35651 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:31,582 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/C is initiating minor compaction (all files) 2024-11-20T15:21:31,582 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/C in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:31,582 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/319ccaa728754e439960eefe0febc211, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/11b8b40e400c4d44b8a4f2edc9f6633a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/00db410f756d4242ad0f81dfe39c03d2] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=34.8 K 2024-11-20T15:21:31,583 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 319ccaa728754e439960eefe0febc211, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=504, earliestPutTs=1732116089339 2024-11-20T15:21:31,584 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 11b8b40e400c4d44b8a4f2edc9f6633a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=529, earliestPutTs=1732116089985 2024-11-20T15:21:31,584 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 00db410f756d4242ad0f81dfe39c03d2, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=545, earliestPutTs=1732116091122 2024-11-20T15:21:31,604 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#C#compaction#116 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:31,605 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/8ef399b0b477496b9106bf68907d9c66 is 50, key is test_row_0/C:col10/1732116089985/Put/seqid=0 2024-11-20T15:21:31,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:31,610 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T15:21:31,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:31,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:31,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:31,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:31,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:31,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:31,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:31,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116151623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:31,626 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:31,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116151624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:31,626 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/8b534f0180da4bfe92d1871f8b734a7b is 50, key is test_row_0/A:col10/1732116091602/Put/seqid=0 2024-11-20T15:21:31,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741955_1131 (size=13595) 2024-11-20T15:21:31,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741956_1132 (size=12301) 2024-11-20T15:21:31,728 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:31,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116151726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:31,729 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:31,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116151727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:31,931 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:31,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 271 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116151931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:31,932 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:31,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116151931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:31,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T15:21:31,967 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-11-20T15:21:31,968 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:21:31,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees 2024-11-20T15:21:31,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T15:21:31,971 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=34, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:21:31,972 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=34, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:21:31,972 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:21:31,978 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/d309bd0f7d924f00acb26c288b68bdd8 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/d309bd0f7d924f00acb26c288b68bdd8 2024-11-20T15:21:31,984 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/A of d2e9d0b7937c41ae63a82116ea698557 into d309bd0f7d924f00acb26c288b68bdd8(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:31,984 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:31,984 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/A, priority=13, startTime=1732116091524; duration=0sec 2024-11-20T15:21:31,984 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:31,984 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:A 2024-11-20T15:21:32,054 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=571 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/8b534f0180da4bfe92d1871f8b734a7b 2024-11-20T15:21:32,055 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/8ef399b0b477496b9106bf68907d9c66 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/8ef399b0b477496b9106bf68907d9c66 2024-11-20T15:21:32,062 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/C of d2e9d0b7937c41ae63a82116ea698557 into 8ef399b0b477496b9106bf68907d9c66(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:32,063 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:32,063 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/C, priority=13, startTime=1732116091525; duration=0sec 2024-11-20T15:21:32,063 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:32,063 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:C 2024-11-20T15:21:32,066 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/b0eb000e9437469ab8880dd2f1ca82db is 50, key is test_row_0/B:col10/1732116091602/Put/seqid=0 2024-11-20T15:21:32,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T15:21:32,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741957_1133 (size=12301) 2024-11-20T15:21:32,124 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:32,125 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-20T15:21:32,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:32,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:32,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:32,125 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:32,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:32,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:32,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:32,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 273 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116152234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:32,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:32,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 280 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116152234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:32,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T15:21:32,278 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:32,278 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-20T15:21:32,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:32,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:32,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:32,279 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:32,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:32,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:32,431 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:32,432 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-20T15:21:32,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:32,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:32,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:32,432 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:32,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:32,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:32,475 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=571 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/b0eb000e9437469ab8880dd2f1ca82db 2024-11-20T15:21:32,485 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/621187f9604745068e230da699ce8d43 is 50, key is test_row_0/C:col10/1732116091602/Put/seqid=0 2024-11-20T15:21:32,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741958_1134 (size=12301) 2024-11-20T15:21:32,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T15:21:32,584 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:32,585 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-20T15:21:32,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:32,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:32,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:32,585 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:32,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:32,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:32,738 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:32,739 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-20T15:21:32,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:32,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:32,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 275 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116152738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:32,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:32,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:32,739 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:32,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:32,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:32,741 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:32,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 282 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116152740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:32,890 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=571 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/621187f9604745068e230da699ce8d43 2024-11-20T15:21:32,892 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:32,892 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-20T15:21:32,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:32,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:32,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:32,893 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:32,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:32,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:32,898 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/8b534f0180da4bfe92d1871f8b734a7b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/8b534f0180da4bfe92d1871f8b734a7b 2024-11-20T15:21:32,904 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/8b534f0180da4bfe92d1871f8b734a7b, entries=150, sequenceid=571, filesize=12.0 K 2024-11-20T15:21:32,905 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/b0eb000e9437469ab8880dd2f1ca82db as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/b0eb000e9437469ab8880dd2f1ca82db 2024-11-20T15:21:32,910 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/b0eb000e9437469ab8880dd2f1ca82db, entries=150, sequenceid=571, filesize=12.0 K 2024-11-20T15:21:32,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/621187f9604745068e230da699ce8d43 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/621187f9604745068e230da699ce8d43 2024-11-20T15:21:32,917 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/621187f9604745068e230da699ce8d43, entries=150, sequenceid=571, filesize=12.0 K 2024-11-20T15:21:32,918 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for d2e9d0b7937c41ae63a82116ea698557 in 1308ms, sequenceid=571, compaction requested=false 2024-11-20T15:21:32,918 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:33,045 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:33,045 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-20T15:21:33,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:33,046 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T15:21:33,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:33,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:33,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:33,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:33,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:33,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:33,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/c0d3cca0bbe54dbbaf8d64b3024675f1 is 50, key is test_row_0/A:col10/1732116091622/Put/seqid=0 2024-11-20T15:21:33,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741959_1135 (size=12301) 2024-11-20T15:21:33,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T15:21:33,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:33,456 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:33,458 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=584 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/c0d3cca0bbe54dbbaf8d64b3024675f1 2024-11-20T15:21:33,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/e83783df50364224a7c01913f8e4ad62 is 50, key is test_row_0/B:col10/1732116091622/Put/seqid=0 2024-11-20T15:21:33,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741960_1136 (size=12301) 2024-11-20T15:21:33,481 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=584 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/e83783df50364224a7c01913f8e4ad62 2024-11-20T15:21:33,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/72810ec77fa442a1b8392137c23f3c3a is 50, key is test_row_0/C:col10/1732116091622/Put/seqid=0 2024-11-20T15:21:33,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:33,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116153503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:33,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741961_1137 (size=12301) 2024-11-20T15:21:33,608 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:33,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116153606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:33,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:33,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 284 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116153744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:33,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:33,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 277 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116153745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:33,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:33,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116153810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:33,908 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=584 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/72810ec77fa442a1b8392137c23f3c3a 2024-11-20T15:21:33,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/c0d3cca0bbe54dbbaf8d64b3024675f1 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/c0d3cca0bbe54dbbaf8d64b3024675f1 2024-11-20T15:21:33,919 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/c0d3cca0bbe54dbbaf8d64b3024675f1, entries=150, sequenceid=584, filesize=12.0 K 2024-11-20T15:21:33,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/e83783df50364224a7c01913f8e4ad62 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/e83783df50364224a7c01913f8e4ad62 2024-11-20T15:21:33,925 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/e83783df50364224a7c01913f8e4ad62, entries=150, sequenceid=584, filesize=12.0 K 2024-11-20T15:21:33,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/72810ec77fa442a1b8392137c23f3c3a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/72810ec77fa442a1b8392137c23f3c3a 2024-11-20T15:21:33,931 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/72810ec77fa442a1b8392137c23f3c3a, entries=150, sequenceid=584, filesize=12.0 K 2024-11-20T15:21:33,932 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for d2e9d0b7937c41ae63a82116ea698557 in 886ms, sequenceid=584, compaction requested=true 2024-11-20T15:21:33,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:33,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:33,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=35 2024-11-20T15:21:33,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=35 2024-11-20T15:21:33,935 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-11-20T15:21:33,935 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9610 sec 2024-11-20T15:21:33,937 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees in 1.9670 sec 2024-11-20T15:21:34,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T15:21:34,075 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-11-20T15:21:34,076 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:21:34,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=36, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=36, table=TestAcidGuarantees 2024-11-20T15:21:34,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T15:21:34,079 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=36, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=36, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:21:34,080 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=36, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=36, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:21:34,080 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:21:34,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:34,115 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T15:21:34,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:34,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:34,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:34,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:34,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:34,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:34,121 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/24eb4ece68904109a54121cc1778ca4b is 50, key is test_row_0/A:col10/1732116093491/Put/seqid=0 2024-11-20T15:21:34,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741962_1138 (size=14741) 2024-11-20T15:21:34,137 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:34,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116154136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:34,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T15:21:34,231 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:34,231 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:34,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732116154231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:34,232 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=37 2024-11-20T15:21:34,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:34,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:34,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:34,232 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] handler.RSProcedureHandler(58): pid=37 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:34,233 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8140 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., hostname=0b62285ead89,33387,1732116069954, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:21:34,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=37 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:34,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=37 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:34,239 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:34,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116154239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:34,265 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:34,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45810 deadline: 1732116154264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:34,265 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8176 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., hostname=0b62285ead89,33387,1732116069954, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:21:34,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T15:21:34,385 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:34,385 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=37 2024-11-20T15:21:34,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:34,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:34,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:34,386 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=37}] handler.RSProcedureHandler(58): pid=37 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:34,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=37 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:34,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=37 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:34,444 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:34,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116154440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:34,526 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=610 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/24eb4ece68904109a54121cc1778ca4b 2024-11-20T15:21:34,536 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/ecc19db6810e4ed49ace04c185cd07e6 is 50, key is test_row_0/B:col10/1732116093491/Put/seqid=0 2024-11-20T15:21:34,538 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:34,538 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=37 2024-11-20T15:21:34,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:34,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:34,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:34,539 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] handler.RSProcedureHandler(58): pid=37 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:34,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=37 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:34,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=37 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:34,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741963_1139 (size=12301) 2024-11-20T15:21:34,541 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=610 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/ecc19db6810e4ed49ace04c185cd07e6 2024-11-20T15:21:34,550 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/476808cbeb5e4041a72389a5a6eaa816 is 50, key is test_row_0/C:col10/1732116093491/Put/seqid=0 2024-11-20T15:21:34,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741964_1140 (size=12301) 2024-11-20T15:21:34,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T15:21:34,691 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:34,692 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=37 2024-11-20T15:21:34,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:34,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:34,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:34,692 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] handler.RSProcedureHandler(58): pid=37 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:34,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=37 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:34,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=37 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:34,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:34,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116154747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:34,844 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:34,845 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=37 2024-11-20T15:21:34,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:34,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:34,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:34,845 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=37}] handler.RSProcedureHandler(58): pid=37 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:34,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=37 java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:34,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=37 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:34,955 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=610 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/476808cbeb5e4041a72389a5a6eaa816 2024-11-20T15:21:34,961 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/24eb4ece68904109a54121cc1778ca4b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/24eb4ece68904109a54121cc1778ca4b 2024-11-20T15:21:34,967 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/24eb4ece68904109a54121cc1778ca4b, entries=200, sequenceid=610, filesize=14.4 K 2024-11-20T15:21:34,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/ecc19db6810e4ed49ace04c185cd07e6 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/ecc19db6810e4ed49ace04c185cd07e6 2024-11-20T15:21:34,973 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/ecc19db6810e4ed49ace04c185cd07e6, entries=150, sequenceid=610, filesize=12.0 K 2024-11-20T15:21:34,975 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/476808cbeb5e4041a72389a5a6eaa816 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/476808cbeb5e4041a72389a5a6eaa816 2024-11-20T15:21:34,981 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/476808cbeb5e4041a72389a5a6eaa816, entries=150, sequenceid=610, filesize=12.0 K 2024-11-20T15:21:34,982 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for d2e9d0b7937c41ae63a82116ea698557 in 867ms, sequenceid=610, compaction requested=true 2024-11-20T15:21:34,983 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:34,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:21:34,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:34,983 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:21:34,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:21:34,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:34,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:21:34,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:34,983 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:21:34,985 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52938 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:21:34,985 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50498 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:21:34,985 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/B is initiating minor compaction (all files) 2024-11-20T15:21:34,985 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/A is initiating minor compaction (all files) 2024-11-20T15:21:34,985 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/B in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:34,985 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/A in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:34,985 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/a7724d4588f74a81a597a68e21d02950, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/b0eb000e9437469ab8880dd2f1ca82db, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/e83783df50364224a7c01913f8e4ad62, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/ecc19db6810e4ed49ace04c185cd07e6] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=49.3 K 2024-11-20T15:21:34,985 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/d309bd0f7d924f00acb26c288b68bdd8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/8b534f0180da4bfe92d1871f8b734a7b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/c0d3cca0bbe54dbbaf8d64b3024675f1, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/24eb4ece68904109a54121cc1778ca4b] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=51.7 K 2024-11-20T15:21:34,986 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting a7724d4588f74a81a597a68e21d02950, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=545, earliestPutTs=1732116089985 2024-11-20T15:21:34,986 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting d309bd0f7d924f00acb26c288b68bdd8, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=545, earliestPutTs=1732116089985 2024-11-20T15:21:34,989 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting b0eb000e9437469ab8880dd2f1ca82db, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=571, earliestPutTs=1732116091452 2024-11-20T15:21:34,989 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b534f0180da4bfe92d1871f8b734a7b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=571, earliestPutTs=1732116091452 2024-11-20T15:21:34,989 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting e83783df50364224a7c01913f8e4ad62, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=584, earliestPutTs=1732116091619 2024-11-20T15:21:34,989 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting c0d3cca0bbe54dbbaf8d64b3024675f1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=584, earliestPutTs=1732116091619 2024-11-20T15:21:34,990 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 24eb4ece68904109a54121cc1778ca4b, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=610, earliestPutTs=1732116093484 2024-11-20T15:21:34,990 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting ecc19db6810e4ed49ace04c185cd07e6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=610, earliestPutTs=1732116093491 2024-11-20T15:21:34,997 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:34,998 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=37 2024-11-20T15:21:34,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:34,999 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T15:21:34,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:34,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:34,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:34,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:34,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:34,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:35,002 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#A#compaction#126 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:35,003 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/d76fc35298f04c91844234fae870147a is 50, key is test_row_0/A:col10/1732116093491/Put/seqid=0 2024-11-20T15:21:35,005 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#B#compaction#127 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:35,006 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/34e1c0e78c484dbb9e5f29450320b44e is 50, key is test_row_0/B:col10/1732116093491/Put/seqid=0 2024-11-20T15:21:35,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/bfbeb2cd0a934fb28e9e27118110b84d is 50, key is test_row_0/A:col10/1732116094126/Put/seqid=0 2024-11-20T15:21:35,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741966_1142 (size=13731) 2024-11-20T15:21:35,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741965_1141 (size=13731) 2024-11-20T15:21:35,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741967_1143 (size=12301) 2024-11-20T15:21:35,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T15:21:35,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:35,255 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:35,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:35,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116155323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:35,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:35,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116155426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:35,437 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/34e1c0e78c484dbb9e5f29450320b44e as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/34e1c0e78c484dbb9e5f29450320b44e 2024-11-20T15:21:35,439 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=620 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/bfbeb2cd0a934fb28e9e27118110b84d 2024-11-20T15:21:35,441 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/d76fc35298f04c91844234fae870147a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/d76fc35298f04c91844234fae870147a 2024-11-20T15:21:35,446 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/B of d2e9d0b7937c41ae63a82116ea698557 into 34e1c0e78c484dbb9e5f29450320b44e(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:35,446 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:35,446 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/B, priority=12, startTime=1732116094983; duration=0sec 2024-11-20T15:21:35,447 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:35,447 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:B 2024-11-20T15:21:35,447 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:21:35,450 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/A of d2e9d0b7937c41ae63a82116ea698557 into d76fc35298f04c91844234fae870147a(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:35,450 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:35,450 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/A, priority=12, startTime=1732116094983; duration=0sec 2024-11-20T15:21:35,450 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:35,450 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:A 2024-11-20T15:21:35,469 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50498 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:21:35,469 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/C is initiating minor compaction (all files) 2024-11-20T15:21:35,469 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/C in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:35,470 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/8ef399b0b477496b9106bf68907d9c66, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/621187f9604745068e230da699ce8d43, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/72810ec77fa442a1b8392137c23f3c3a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/476808cbeb5e4041a72389a5a6eaa816] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=49.3 K 2024-11-20T15:21:35,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/8267eb82314c433c9a7692488cb08ce5 is 50, key is test_row_0/B:col10/1732116094126/Put/seqid=0 2024-11-20T15:21:35,470 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ef399b0b477496b9106bf68907d9c66, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=545, earliestPutTs=1732116089985 2024-11-20T15:21:35,471 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 621187f9604745068e230da699ce8d43, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=571, earliestPutTs=1732116091452 2024-11-20T15:21:35,471 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 72810ec77fa442a1b8392137c23f3c3a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=584, earliestPutTs=1732116091619 2024-11-20T15:21:35,472 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 476808cbeb5e4041a72389a5a6eaa816, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=610, earliestPutTs=1732116093491 2024-11-20T15:21:35,490 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#C#compaction#130 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:35,490 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/e751c0efb948494a9aa0e005ec63df80 is 50, key is test_row_0/C:col10/1732116093491/Put/seqid=0 2024-11-20T15:21:35,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741968_1144 (size=12301) 2024-11-20T15:21:35,494 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=620 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/8267eb82314c433c9a7692488cb08ce5 2024-11-20T15:21:35,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741969_1145 (size=13731) 2024-11-20T15:21:35,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/dc52dc34dd994158a8c8c41a5eae5bc8 is 50, key is test_row_0/C:col10/1732116094126/Put/seqid=0 2024-11-20T15:21:35,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741970_1146 (size=12301) 2024-11-20T15:21:35,632 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:35,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116155629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:35,758 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:35,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 279 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45842 deadline: 1732116155758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:35,759 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4136 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., hostname=0b62285ead89,33387,1732116069954, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:21:35,763 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:35,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 286 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45806 deadline: 1732116155761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:35,764 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4141 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., hostname=0b62285ead89,33387,1732116069954, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:21:35,916 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/e751c0efb948494a9aa0e005ec63df80 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/e751c0efb948494a9aa0e005ec63df80 2024-11-20T15:21:35,922 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/C of d2e9d0b7937c41ae63a82116ea698557 into e751c0efb948494a9aa0e005ec63df80(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:35,922 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:35,922 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/C, priority=12, startTime=1732116094983; duration=0sec 2024-11-20T15:21:35,922 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:35,922 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:C 2024-11-20T15:21:35,924 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=620 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/dc52dc34dd994158a8c8c41a5eae5bc8 2024-11-20T15:21:35,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/bfbeb2cd0a934fb28e9e27118110b84d as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/bfbeb2cd0a934fb28e9e27118110b84d 2024-11-20T15:21:35,933 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/bfbeb2cd0a934fb28e9e27118110b84d, entries=150, sequenceid=620, filesize=12.0 K 2024-11-20T15:21:35,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/8267eb82314c433c9a7692488cb08ce5 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/8267eb82314c433c9a7692488cb08ce5 2024-11-20T15:21:35,934 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:35,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116155934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:35,939 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/8267eb82314c433c9a7692488cb08ce5, entries=150, sequenceid=620, filesize=12.0 K 2024-11-20T15:21:35,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/dc52dc34dd994158a8c8c41a5eae5bc8 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/dc52dc34dd994158a8c8c41a5eae5bc8 2024-11-20T15:21:35,945 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/dc52dc34dd994158a8c8c41a5eae5bc8, entries=150, sequenceid=620, filesize=12.0 K 2024-11-20T15:21:35,946 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for d2e9d0b7937c41ae63a82116ea698557 in 947ms, sequenceid=620, compaction requested=false 2024-11-20T15:21:35,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:35,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:35,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=37 2024-11-20T15:21:35,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=37 2024-11-20T15:21:35,949 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-11-20T15:21:35,949 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8670 sec 2024-11-20T15:21:35,950 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=36, table=TestAcidGuarantees in 1.8730 sec 2024-11-20T15:21:36,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T15:21:36,182 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 36 completed 2024-11-20T15:21:36,183 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:21:36,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=38, table=TestAcidGuarantees 2024-11-20T15:21:36,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-20T15:21:36,184 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=38, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=38, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:21:36,185 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=38, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=38, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:21:36,185 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:21:36,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-20T15:21:36,337 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:36,337 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=39 2024-11-20T15:21:36,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=39}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:36,338 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=39}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T15:21:36,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:36,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:36,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:36,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:36,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:36,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:36,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/59a96c8a93ce49a9916cfee3a78197ee is 50, key is test_row_0/A:col10/1732116095322/Put/seqid=0 2024-11-20T15:21:36,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741971_1147 (size=12301) 2024-11-20T15:21:36,352 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=649 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/59a96c8a93ce49a9916cfee3a78197ee 2024-11-20T15:21:36,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/347190bf1bbf41ff956cf72b9e254e8a is 50, key is test_row_0/B:col10/1732116095322/Put/seqid=0 2024-11-20T15:21:36,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741972_1148 (size=12301) 2024-11-20T15:21:36,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:36,440 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. as already flushing 2024-11-20T15:21:36,457 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:36,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116156456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:36,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-20T15:21:36,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:36,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116156559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:36,722 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x353bc462 to 127.0.0.1:62338 2024-11-20T15:21:36,722 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:21:36,722 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6502d571 to 127.0.0.1:62338 2024-11-20T15:21:36,722 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2a0471b9 to 127.0.0.1:62338 2024-11-20T15:21:36,722 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:21:36,722 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:21:36,723 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c3b736e to 127.0.0.1:62338 2024-11-20T15:21:36,723 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:21:36,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:36,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116156762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:36,765 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=649 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/347190bf1bbf41ff956cf72b9e254e8a 2024-11-20T15:21:36,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/f1d362d6863d494c8827b69aea84c185 is 50, key is test_row_0/C:col10/1732116095322/Put/seqid=0 2024-11-20T15:21:36,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741973_1149 (size=12301) 2024-11-20T15:21:36,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-20T15:21:37,066 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:37,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45812 deadline: 1732116157066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:37,177 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=649 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/f1d362d6863d494c8827b69aea84c185 2024-11-20T15:21:37,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/59a96c8a93ce49a9916cfee3a78197ee as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/59a96c8a93ce49a9916cfee3a78197ee 2024-11-20T15:21:37,186 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/59a96c8a93ce49a9916cfee3a78197ee, entries=150, sequenceid=649, filesize=12.0 K 2024-11-20T15:21:37,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/347190bf1bbf41ff956cf72b9e254e8a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/347190bf1bbf41ff956cf72b9e254e8a 2024-11-20T15:21:37,191 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/347190bf1bbf41ff956cf72b9e254e8a, entries=150, sequenceid=649, filesize=12.0 K 2024-11-20T15:21:37,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/f1d362d6863d494c8827b69aea84c185 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f1d362d6863d494c8827b69aea84c185 2024-11-20T15:21:37,195 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f1d362d6863d494c8827b69aea84c185, entries=150, sequenceid=649, filesize=12.0 K 2024-11-20T15:21:37,196 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=39}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for d2e9d0b7937c41ae63a82116ea698557 in 858ms, sequenceid=649, compaction requested=true 2024-11-20T15:21:37,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=39}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:37,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=39}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:37,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=39}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=39 2024-11-20T15:21:37,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=39 2024-11-20T15:21:37,198 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-11-20T15:21:37,198 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0120 sec 2024-11-20T15:21:37,199 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=38, table=TestAcidGuarantees in 1.0160 sec 2024-11-20T15:21:37,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-20T15:21:37,288 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 38 completed 2024-11-20T15:21:37,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:37,570 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T15:21:37,570 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4ee2166f to 127.0.0.1:62338 2024-11-20T15:21:37,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:37,570 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:21:37,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:37,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:37,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:37,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:37,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:37,574 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/f0ca2ac1ca2f4ad694856aa5ccd0444b is 50, key is test_row_0/A:col10/1732116096451/Put/seqid=0 2024-11-20T15:21:37,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741974_1150 (size=12301) 2024-11-20T15:21:37,979 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=660 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/f0ca2ac1ca2f4ad694856aa5ccd0444b 2024-11-20T15:21:37,986 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/851d9c8c407b4649afd7c14186e69a7f is 50, key is test_row_0/B:col10/1732116096451/Put/seqid=0 2024-11-20T15:21:37,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741975_1151 (size=12301) 2024-11-20T15:21:38,385 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T15:21:38,391 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=660 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/851d9c8c407b4649afd7c14186e69a7f 2024-11-20T15:21:38,398 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/03ba1cbda6a84716a67263ee9d02290c is 50, key is test_row_0/C:col10/1732116096451/Put/seqid=0 2024-11-20T15:21:38,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741976_1152 (size=12301) 2024-11-20T15:21:38,803 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=660 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/03ba1cbda6a84716a67263ee9d02290c 2024-11-20T15:21:38,808 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/f0ca2ac1ca2f4ad694856aa5ccd0444b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/f0ca2ac1ca2f4ad694856aa5ccd0444b 2024-11-20T15:21:38,812 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/f0ca2ac1ca2f4ad694856aa5ccd0444b, entries=150, sequenceid=660, filesize=12.0 K 2024-11-20T15:21:38,813 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/851d9c8c407b4649afd7c14186e69a7f as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/851d9c8c407b4649afd7c14186e69a7f 2024-11-20T15:21:38,816 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/851d9c8c407b4649afd7c14186e69a7f, entries=150, sequenceid=660, filesize=12.0 K 2024-11-20T15:21:38,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/03ba1cbda6a84716a67263ee9d02290c as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/03ba1cbda6a84716a67263ee9d02290c 2024-11-20T15:21:38,820 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/03ba1cbda6a84716a67263ee9d02290c, entries=150, sequenceid=660, filesize=12.0 K 2024-11-20T15:21:38,821 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for d2e9d0b7937c41ae63a82116ea698557 in 1251ms, sequenceid=660, compaction requested=true 2024-11-20T15:21:38,821 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:38,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:21:38,821 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:21:38,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:38,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:21:38,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:38,821 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:21:38,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2e9d0b7937c41ae63a82116ea698557:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:21:38,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:38,823 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:21:38,823 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/A is initiating minor compaction (all files) 2024-11-20T15:21:38,823 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:21:38,823 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/B is initiating minor compaction (all files) 2024-11-20T15:21:38,823 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/A in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:38,823 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/B in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:38,823 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/d76fc35298f04c91844234fae870147a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/bfbeb2cd0a934fb28e9e27118110b84d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/59a96c8a93ce49a9916cfee3a78197ee, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/f0ca2ac1ca2f4ad694856aa5ccd0444b] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=49.4 K 2024-11-20T15:21:38,823 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/34e1c0e78c484dbb9e5f29450320b44e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/8267eb82314c433c9a7692488cb08ce5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/347190bf1bbf41ff956cf72b9e254e8a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/851d9c8c407b4649afd7c14186e69a7f] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=49.4 K 2024-11-20T15:21:38,823 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting d76fc35298f04c91844234fae870147a, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=610, earliestPutTs=1732116093491 2024-11-20T15:21:38,823 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 34e1c0e78c484dbb9e5f29450320b44e, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=610, earliestPutTs=1732116093491 2024-11-20T15:21:38,824 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting bfbeb2cd0a934fb28e9e27118110b84d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=620, earliestPutTs=1732116094126 2024-11-20T15:21:38,824 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 8267eb82314c433c9a7692488cb08ce5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=620, earliestPutTs=1732116094126 2024-11-20T15:21:38,824 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 59a96c8a93ce49a9916cfee3a78197ee, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=649, earliestPutTs=1732116095313 2024-11-20T15:21:38,824 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 347190bf1bbf41ff956cf72b9e254e8a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=649, earliestPutTs=1732116095313 2024-11-20T15:21:38,824 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting f0ca2ac1ca2f4ad694856aa5ccd0444b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=660, earliestPutTs=1732116096451 2024-11-20T15:21:38,824 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 851d9c8c407b4649afd7c14186e69a7f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=660, earliestPutTs=1732116096451 2024-11-20T15:21:38,834 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#A#compaction#138 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:38,835 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/0bea31c7e4e44dc9a68b0cde719645a8 is 50, key is test_row_0/A:col10/1732116096451/Put/seqid=0 2024-11-20T15:21:38,837 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#B#compaction#139 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:38,838 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/684b04e30eb94b7caf10a73a35011e8d is 50, key is test_row_0/B:col10/1732116096451/Put/seqid=0 2024-11-20T15:21:38,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741977_1153 (size=13867) 2024-11-20T15:21:38,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741978_1154 (size=13867) 2024-11-20T15:21:39,245 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/0bea31c7e4e44dc9a68b0cde719645a8 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/0bea31c7e4e44dc9a68b0cde719645a8 2024-11-20T15:21:39,246 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/684b04e30eb94b7caf10a73a35011e8d as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/684b04e30eb94b7caf10a73a35011e8d 2024-11-20T15:21:39,250 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/A of d2e9d0b7937c41ae63a82116ea698557 into 0bea31c7e4e44dc9a68b0cde719645a8(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:39,250 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:39,250 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/B of d2e9d0b7937c41ae63a82116ea698557 into 684b04e30eb94b7caf10a73a35011e8d(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:39,250 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/A, priority=12, startTime=1732116098821; duration=0sec 2024-11-20T15:21:39,250 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:39,251 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/B, priority=12, startTime=1732116098821; duration=0sec 2024-11-20T15:21:39,251 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:39,251 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:A 2024-11-20T15:21:39,251 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:39,251 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:21:39,251 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:B 2024-11-20T15:21:39,252 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:21:39,252 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): d2e9d0b7937c41ae63a82116ea698557/C is initiating minor compaction (all files) 2024-11-20T15:21:39,252 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2e9d0b7937c41ae63a82116ea698557/C in TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:39,252 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/e751c0efb948494a9aa0e005ec63df80, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/dc52dc34dd994158a8c8c41a5eae5bc8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f1d362d6863d494c8827b69aea84c185, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/03ba1cbda6a84716a67263ee9d02290c] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp, totalSize=49.4 K 2024-11-20T15:21:39,253 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting e751c0efb948494a9aa0e005ec63df80, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=610, earliestPutTs=1732116093491 2024-11-20T15:21:39,253 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting dc52dc34dd994158a8c8c41a5eae5bc8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=620, earliestPutTs=1732116094126 2024-11-20T15:21:39,254 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting f1d362d6863d494c8827b69aea84c185, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=649, earliestPutTs=1732116095313 2024-11-20T15:21:39,254 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03ba1cbda6a84716a67263ee9d02290c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=660, earliestPutTs=1732116096451 2024-11-20T15:21:39,264 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2e9d0b7937c41ae63a82116ea698557#C#compaction#140 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:39,265 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/e101549af7124aedbe1220bd729e5793 is 50, key is test_row_0/C:col10/1732116096451/Put/seqid=0 2024-11-20T15:21:39,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741979_1155 (size=13867) 2024-11-20T15:21:39,675 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/e101549af7124aedbe1220bd729e5793 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/e101549af7124aedbe1220bd729e5793 2024-11-20T15:21:39,680 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2e9d0b7937c41ae63a82116ea698557/C of d2e9d0b7937c41ae63a82116ea698557 into e101549af7124aedbe1220bd729e5793(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:39,680 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:39,680 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557., storeName=d2e9d0b7937c41ae63a82116ea698557/C, priority=12, startTime=1732116098821; duration=0sec 2024-11-20T15:21:39,680 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:39,680 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2e9d0b7937c41ae63a82116ea698557:C 2024-11-20T15:21:39,799 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6c63ae4e to 127.0.0.1:62338 2024-11-20T15:21:39,799 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:21:39,803 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b5f27aa to 127.0.0.1:62338 2024-11-20T15:21:39,803 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:21:44,251 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62c43377 to 127.0.0.1:62338 2024-11-20T15:21:44,251 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:21:44,343 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5a8f4734 to 127.0.0.1:62338 2024-11-20T15:21:44,343 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:21:44,343 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T15:21:44,343 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 133 2024-11-20T15:21:44,343 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-11-20T15:21:44,343 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 117 2024-11-20T15:21:44,343 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 70 2024-11-20T15:21:44,343 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 140 2024-11-20T15:21:44,343 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T15:21:44,343 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6780 2024-11-20T15:21:44,343 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6602 2024-11-20T15:21:44,343 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T15:21:44,343 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2936 2024-11-20T15:21:44,343 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8807 rows 2024-11-20T15:21:44,343 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2902 2024-11-20T15:21:44,343 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8704 rows 2024-11-20T15:21:44,344 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T15:21:44,344 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0e98ea32 to 127.0.0.1:62338 2024-11-20T15:21:44,344 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:21:44,347 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T15:21:44,364 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T15:21:44,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=40, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T15:21:44,373 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732116104373"}]},"ts":"1732116104373"} 2024-11-20T15:21:44,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-11-20T15:21:44,374 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T15:21:44,377 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T15:21:44,378 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T15:21:44,382 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2e9d0b7937c41ae63a82116ea698557, UNASSIGN}] 2024-11-20T15:21:44,383 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2e9d0b7937c41ae63a82116ea698557, UNASSIGN 2024-11-20T15:21:44,383 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=d2e9d0b7937c41ae63a82116ea698557, regionState=CLOSING, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:21:44,384 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T15:21:44,385 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE; CloseRegionProcedure d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954}] 2024-11-20T15:21:44,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-11-20T15:21:44,539 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:44,541 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(124): Close d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:44,541 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T15:21:44,541 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1681): Closing d2e9d0b7937c41ae63a82116ea698557, disabling compactions & flushes 2024-11-20T15:21:44,542 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:44,542 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:44,542 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. after waiting 0 ms 2024-11-20T15:21:44,542 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:44,542 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(2837): Flushing d2e9d0b7937c41ae63a82116ea698557 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-20T15:21:44,542 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=A 2024-11-20T15:21:44,542 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:44,542 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=B 2024-11-20T15:21:44,542 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:44,542 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2e9d0b7937c41ae63a82116ea698557, store=C 2024-11-20T15:21:44,542 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:44,547 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/8d2768b280524292a0292378d12a6335 is 50, key is test_row_0/A:col10/1732116099802/Put/seqid=0 2024-11-20T15:21:44,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741980_1156 (size=9857) 2024-11-20T15:21:44,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-11-20T15:21:44,951 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=670 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/8d2768b280524292a0292378d12a6335 2024-11-20T15:21:44,959 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/25b790ac480e4a69921c3ec911df6365 is 50, key is test_row_0/B:col10/1732116099802/Put/seqid=0 2024-11-20T15:21:44,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741981_1157 (size=9857) 2024-11-20T15:21:44,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-11-20T15:21:45,363 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=670 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/25b790ac480e4a69921c3ec911df6365 2024-11-20T15:21:45,370 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/846a88d5f45a405498bd6f460a8a34de is 50, key is test_row_0/C:col10/1732116099802/Put/seqid=0 2024-11-20T15:21:45,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741982_1158 (size=9857) 2024-11-20T15:21:45,418 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T15:21:45,420 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33678, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T15:21:45,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-11-20T15:21:45,775 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=670 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/846a88d5f45a405498bd6f460a8a34de 2024-11-20T15:21:45,780 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/A/8d2768b280524292a0292378d12a6335 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/8d2768b280524292a0292378d12a6335 2024-11-20T15:21:45,784 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/8d2768b280524292a0292378d12a6335, entries=100, sequenceid=670, filesize=9.6 K 2024-11-20T15:21:45,785 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/B/25b790ac480e4a69921c3ec911df6365 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/25b790ac480e4a69921c3ec911df6365 2024-11-20T15:21:45,789 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/25b790ac480e4a69921c3ec911df6365, entries=100, sequenceid=670, filesize=9.6 K 2024-11-20T15:21:45,790 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/.tmp/C/846a88d5f45a405498bd6f460a8a34de as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/846a88d5f45a405498bd6f460a8a34de 2024-11-20T15:21:45,793 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/846a88d5f45a405498bd6f460a8a34de, entries=100, sequenceid=670, filesize=9.6 K 2024-11-20T15:21:45,794 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for d2e9d0b7937c41ae63a82116ea698557 in 1252ms, sequenceid=670, compaction requested=false 2024-11-20T15:21:45,795 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/e46ba40fb1c742e98af60d8b247e81b4, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/6a32833fe7804d049dd48aef49452324, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/931a3c4da70e4cde9b5d6184622d0145, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/734bf2e426c34e269af3352afd063fa1, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/1c5b0266c5bf4a5b8df7cc366c63b1e2, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/5176bd14faaf4bd2aeb11d12b7eb1699, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/a2365916160a413784df542a5b4bbac4, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/d2970cbed588406fb3d6891a1a2e5ccd, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/031556377f1f411b9cb3ed01b88cb304, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/3811f16a80634f0c8b80cac89cf2a851, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/631c3a945bc649089ecb47e740cf19f2, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/f36b6f47959445089025bc7d3eeaee14, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/e086e39802c7472dba7ae2637ca24071, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/2d16e88be4b740c99ae04f8643eae81b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/6e085904f79f492eb0baa32ac5b45824, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/832b4d46ba5e43069beab8867d5a76cb, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/9ee08841ae6a4aa587b9ff7bb1b02e7f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/abf38656a12d4724a72effd5c7aa553c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/faeb1a637f2f477c994bf8918709cab3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/79990afdd85c42bfa21cbb383d79e281, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/aba26917ddb240e6b23843ee2a1af626, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/62eace6fb58c47379608e773cd613f67, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/887cfd89e7094ea0a171ee44b0b05d02, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/dfa8e41a61da45b19d17777b70b430f5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/b1870f4fd2f243a08b6842674b20cce6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/5d08da8d6c694d15ab237707aa3804ff, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/6868d659bbaf407189ed6eb3275d2316, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/d204d0853a844235b922639b40e8f87f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/79e2674b5b7043f48e3219b64aa0b9e3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/640b7a91cfee4f89af8fe226c824ee9f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/db0e03ca9c7449698135b21722c7f585, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/91b74f6ec1db420bb18544435b81c1d3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/0dea17b97894488bb70f61efcba42f50, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/e5419f1f87e94c508e83b226729bd5a0, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/8c10126e56aa49c2bc527e7485d67eb7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/4bf478b88efd4d41a1db36adc2474757, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/e39014f940ea4fd19f43300135105112, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/d309bd0f7d924f00acb26c288b68bdd8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/6a2f6b7c2b44459c8a95759b0e2568fa, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/8b534f0180da4bfe92d1871f8b734a7b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/c0d3cca0bbe54dbbaf8d64b3024675f1, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/24eb4ece68904109a54121cc1778ca4b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/d76fc35298f04c91844234fae870147a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/bfbeb2cd0a934fb28e9e27118110b84d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/59a96c8a93ce49a9916cfee3a78197ee, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/f0ca2ac1ca2f4ad694856aa5ccd0444b] to archive 2024-11-20T15:21:45,798 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T15:21:45,803 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/e46ba40fb1c742e98af60d8b247e81b4 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/e46ba40fb1c742e98af60d8b247e81b4 2024-11-20T15:21:45,805 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/6a32833fe7804d049dd48aef49452324 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/6a32833fe7804d049dd48aef49452324 2024-11-20T15:21:45,806 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/931a3c4da70e4cde9b5d6184622d0145 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/931a3c4da70e4cde9b5d6184622d0145 2024-11-20T15:21:45,807 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/734bf2e426c34e269af3352afd063fa1 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/734bf2e426c34e269af3352afd063fa1 2024-11-20T15:21:45,808 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/1c5b0266c5bf4a5b8df7cc366c63b1e2 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/1c5b0266c5bf4a5b8df7cc366c63b1e2 2024-11-20T15:21:45,810 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/5176bd14faaf4bd2aeb11d12b7eb1699 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/5176bd14faaf4bd2aeb11d12b7eb1699 2024-11-20T15:21:45,811 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/a2365916160a413784df542a5b4bbac4 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/a2365916160a413784df542a5b4bbac4 2024-11-20T15:21:45,812 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/d2970cbed588406fb3d6891a1a2e5ccd to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/d2970cbed588406fb3d6891a1a2e5ccd 2024-11-20T15:21:45,813 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/031556377f1f411b9cb3ed01b88cb304 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/031556377f1f411b9cb3ed01b88cb304 2024-11-20T15:21:45,815 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/3811f16a80634f0c8b80cac89cf2a851 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/3811f16a80634f0c8b80cac89cf2a851 2024-11-20T15:21:45,816 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/631c3a945bc649089ecb47e740cf19f2 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/631c3a945bc649089ecb47e740cf19f2 2024-11-20T15:21:45,817 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/f36b6f47959445089025bc7d3eeaee14 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/f36b6f47959445089025bc7d3eeaee14 2024-11-20T15:21:45,819 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/e086e39802c7472dba7ae2637ca24071 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/e086e39802c7472dba7ae2637ca24071 2024-11-20T15:21:45,820 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/2d16e88be4b740c99ae04f8643eae81b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/2d16e88be4b740c99ae04f8643eae81b 2024-11-20T15:21:45,821 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/6e085904f79f492eb0baa32ac5b45824 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/6e085904f79f492eb0baa32ac5b45824 2024-11-20T15:21:45,823 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/832b4d46ba5e43069beab8867d5a76cb to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/832b4d46ba5e43069beab8867d5a76cb 2024-11-20T15:21:45,824 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/9ee08841ae6a4aa587b9ff7bb1b02e7f to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/9ee08841ae6a4aa587b9ff7bb1b02e7f 2024-11-20T15:21:45,826 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/abf38656a12d4724a72effd5c7aa553c to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/abf38656a12d4724a72effd5c7aa553c 2024-11-20T15:21:45,827 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/faeb1a637f2f477c994bf8918709cab3 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/faeb1a637f2f477c994bf8918709cab3 2024-11-20T15:21:45,829 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/79990afdd85c42bfa21cbb383d79e281 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/79990afdd85c42bfa21cbb383d79e281 2024-11-20T15:21:45,830 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/aba26917ddb240e6b23843ee2a1af626 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/aba26917ddb240e6b23843ee2a1af626 2024-11-20T15:21:45,831 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/62eace6fb58c47379608e773cd613f67 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/62eace6fb58c47379608e773cd613f67 2024-11-20T15:21:45,833 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/887cfd89e7094ea0a171ee44b0b05d02 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/887cfd89e7094ea0a171ee44b0b05d02 2024-11-20T15:21:45,834 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/dfa8e41a61da45b19d17777b70b430f5 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/dfa8e41a61da45b19d17777b70b430f5 2024-11-20T15:21:45,835 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/b1870f4fd2f243a08b6842674b20cce6 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/b1870f4fd2f243a08b6842674b20cce6 2024-11-20T15:21:45,837 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/5d08da8d6c694d15ab237707aa3804ff to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/5d08da8d6c694d15ab237707aa3804ff 2024-11-20T15:21:45,838 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/6868d659bbaf407189ed6eb3275d2316 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/6868d659bbaf407189ed6eb3275d2316 2024-11-20T15:21:45,839 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/d204d0853a844235b922639b40e8f87f to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/d204d0853a844235b922639b40e8f87f 2024-11-20T15:21:45,841 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/79e2674b5b7043f48e3219b64aa0b9e3 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/79e2674b5b7043f48e3219b64aa0b9e3 2024-11-20T15:21:45,842 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/640b7a91cfee4f89af8fe226c824ee9f to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/640b7a91cfee4f89af8fe226c824ee9f 2024-11-20T15:21:45,843 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/db0e03ca9c7449698135b21722c7f585 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/db0e03ca9c7449698135b21722c7f585 2024-11-20T15:21:45,845 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/91b74f6ec1db420bb18544435b81c1d3 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/91b74f6ec1db420bb18544435b81c1d3 2024-11-20T15:21:45,846 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/0dea17b97894488bb70f61efcba42f50 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/0dea17b97894488bb70f61efcba42f50 2024-11-20T15:21:45,847 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/e5419f1f87e94c508e83b226729bd5a0 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/e5419f1f87e94c508e83b226729bd5a0 2024-11-20T15:21:45,849 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/8c10126e56aa49c2bc527e7485d67eb7 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/8c10126e56aa49c2bc527e7485d67eb7 2024-11-20T15:21:45,850 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/4bf478b88efd4d41a1db36adc2474757 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/4bf478b88efd4d41a1db36adc2474757 2024-11-20T15:21:45,851 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/e39014f940ea4fd19f43300135105112 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/e39014f940ea4fd19f43300135105112 2024-11-20T15:21:45,852 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/d309bd0f7d924f00acb26c288b68bdd8 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/d309bd0f7d924f00acb26c288b68bdd8 2024-11-20T15:21:45,854 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/6a2f6b7c2b44459c8a95759b0e2568fa to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/6a2f6b7c2b44459c8a95759b0e2568fa 2024-11-20T15:21:45,855 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/8b534f0180da4bfe92d1871f8b734a7b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/8b534f0180da4bfe92d1871f8b734a7b 2024-11-20T15:21:45,856 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/c0d3cca0bbe54dbbaf8d64b3024675f1 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/c0d3cca0bbe54dbbaf8d64b3024675f1 2024-11-20T15:21:45,858 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/24eb4ece68904109a54121cc1778ca4b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/24eb4ece68904109a54121cc1778ca4b 2024-11-20T15:21:45,859 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/d76fc35298f04c91844234fae870147a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/d76fc35298f04c91844234fae870147a 2024-11-20T15:21:45,860 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/bfbeb2cd0a934fb28e9e27118110b84d to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/bfbeb2cd0a934fb28e9e27118110b84d 2024-11-20T15:21:45,862 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/59a96c8a93ce49a9916cfee3a78197ee to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/59a96c8a93ce49a9916cfee3a78197ee 2024-11-20T15:21:45,863 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/f0ca2ac1ca2f4ad694856aa5ccd0444b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/f0ca2ac1ca2f4ad694856aa5ccd0444b 2024-11-20T15:21:45,878 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/851eab813ca742919b77141b4264a81a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/f747aeb9ddd547f2b1c10335c1a8beb0, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/9c48194c21d946f191c8de3e4b750692, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/61f789a2095a41ac8f59f91140d458b3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/b41877d0403841478db5990fff8b3766, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/1294b61bf867408cb9cf3c05e82ffac6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/477502956fda4ac2bdb89a2e65d85fe7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/8b757b05814c4946a1239dc5d0bdcfce, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/6912c869a64841409b6e19c926fd8480, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/63563e7f3e8a41ab91926d8e43d84416, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/72ec3d935be545b89b4d7aa1371f6e39, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/e67fa22739f94ce29721a0be20783d41, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/3ee7bb1b692c446db9faa89ddc068b23, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/7aadc893b1da4b3b919f96ae17b740a1, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/a7e24be5eb8f4a6cb797be641fd22a80, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/3566fdb8701b4064aebd442f71a302c4, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/76eebf1f26be4d00a7d8f9a0145f64a5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/bb628b46a87a4df2aaf5dfe520299475, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/9a57fce5413f4594b65d08206ddac29e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/8d9a61d96091471294f8b692437679cf, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/2bd33984da0c4c699e50a61b81e986d9, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/1225b51efe834f6fbf7c458626a90272, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/e7edb6ff62984600ac4dae524ba544c4, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/40554cb0a1d24f039bc62b7108decca8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/2e85432f6d8e4f20b6a0dd28f036153d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/220199c3b0da45d68555ef52d173d76a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/bdcb08f508f847dba8efdc97f1d8cdf4, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/bf7f44416ba946edaf9fe775d2e96474, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/b27b87b1df364f34aae45237ab49e0f6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/1b68ec9d942a446d88c25a3b161b59c5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/f4482da00d3b4dd1b03c4c5cc8718552, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/200522c011454e638a8d10fe3b17899b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/ad642adb76fa49b486eeb9a10310dbb7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/0a597d3fb3b34685a4dfa4a9c79abfdc, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/2e78e5b494414afbb5c9d79d299c50eb, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/eab2325abfc144029b319c246850a14b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/9920ff5b8aad4da58f8a747a8c169740, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/a7724d4588f74a81a597a68e21d02950, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/6d270516b48f44ebbec920813401c027, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/b0eb000e9437469ab8880dd2f1ca82db, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/e83783df50364224a7c01913f8e4ad62, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/34e1c0e78c484dbb9e5f29450320b44e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/ecc19db6810e4ed49ace04c185cd07e6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/8267eb82314c433c9a7692488cb08ce5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/347190bf1bbf41ff956cf72b9e254e8a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/851d9c8c407b4649afd7c14186e69a7f] to archive 2024-11-20T15:21:45,879 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T15:21:45,881 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/851eab813ca742919b77141b4264a81a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/851eab813ca742919b77141b4264a81a 2024-11-20T15:21:45,882 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/f747aeb9ddd547f2b1c10335c1a8beb0 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/f747aeb9ddd547f2b1c10335c1a8beb0 2024-11-20T15:21:45,883 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/9c48194c21d946f191c8de3e4b750692 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/9c48194c21d946f191c8de3e4b750692 2024-11-20T15:21:45,885 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/61f789a2095a41ac8f59f91140d458b3 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/61f789a2095a41ac8f59f91140d458b3 2024-11-20T15:21:45,886 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/b41877d0403841478db5990fff8b3766 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/b41877d0403841478db5990fff8b3766 2024-11-20T15:21:45,887 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/1294b61bf867408cb9cf3c05e82ffac6 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/1294b61bf867408cb9cf3c05e82ffac6 2024-11-20T15:21:45,888 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/477502956fda4ac2bdb89a2e65d85fe7 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/477502956fda4ac2bdb89a2e65d85fe7 2024-11-20T15:21:45,889 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/8b757b05814c4946a1239dc5d0bdcfce to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/8b757b05814c4946a1239dc5d0bdcfce 2024-11-20T15:21:45,890 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/6912c869a64841409b6e19c926fd8480 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/6912c869a64841409b6e19c926fd8480 2024-11-20T15:21:45,892 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/63563e7f3e8a41ab91926d8e43d84416 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/63563e7f3e8a41ab91926d8e43d84416 2024-11-20T15:21:45,893 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/72ec3d935be545b89b4d7aa1371f6e39 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/72ec3d935be545b89b4d7aa1371f6e39 2024-11-20T15:21:45,894 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/e67fa22739f94ce29721a0be20783d41 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/e67fa22739f94ce29721a0be20783d41 2024-11-20T15:21:45,896 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/3ee7bb1b692c446db9faa89ddc068b23 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/3ee7bb1b692c446db9faa89ddc068b23 2024-11-20T15:21:45,897 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/7aadc893b1da4b3b919f96ae17b740a1 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/7aadc893b1da4b3b919f96ae17b740a1 2024-11-20T15:21:45,898 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/a7e24be5eb8f4a6cb797be641fd22a80 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/a7e24be5eb8f4a6cb797be641fd22a80 2024-11-20T15:21:45,899 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/3566fdb8701b4064aebd442f71a302c4 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/3566fdb8701b4064aebd442f71a302c4 2024-11-20T15:21:45,901 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/76eebf1f26be4d00a7d8f9a0145f64a5 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/76eebf1f26be4d00a7d8f9a0145f64a5 2024-11-20T15:21:45,902 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/bb628b46a87a4df2aaf5dfe520299475 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/bb628b46a87a4df2aaf5dfe520299475 2024-11-20T15:21:45,903 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/9a57fce5413f4594b65d08206ddac29e to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/9a57fce5413f4594b65d08206ddac29e 2024-11-20T15:21:45,904 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/8d9a61d96091471294f8b692437679cf to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/8d9a61d96091471294f8b692437679cf 2024-11-20T15:21:45,906 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/2bd33984da0c4c699e50a61b81e986d9 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/2bd33984da0c4c699e50a61b81e986d9 2024-11-20T15:21:45,907 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/1225b51efe834f6fbf7c458626a90272 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/1225b51efe834f6fbf7c458626a90272 2024-11-20T15:21:45,908 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/e7edb6ff62984600ac4dae524ba544c4 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/e7edb6ff62984600ac4dae524ba544c4 2024-11-20T15:21:45,909 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/40554cb0a1d24f039bc62b7108decca8 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/40554cb0a1d24f039bc62b7108decca8 2024-11-20T15:21:45,911 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/2e85432f6d8e4f20b6a0dd28f036153d to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/2e85432f6d8e4f20b6a0dd28f036153d 2024-11-20T15:21:45,912 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/220199c3b0da45d68555ef52d173d76a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/220199c3b0da45d68555ef52d173d76a 2024-11-20T15:21:45,914 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/bdcb08f508f847dba8efdc97f1d8cdf4 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/bdcb08f508f847dba8efdc97f1d8cdf4 2024-11-20T15:21:45,915 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/bf7f44416ba946edaf9fe775d2e96474 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/bf7f44416ba946edaf9fe775d2e96474 2024-11-20T15:21:45,916 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/b27b87b1df364f34aae45237ab49e0f6 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/b27b87b1df364f34aae45237ab49e0f6 2024-11-20T15:21:45,917 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/1b68ec9d942a446d88c25a3b161b59c5 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/1b68ec9d942a446d88c25a3b161b59c5 2024-11-20T15:21:45,918 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/f4482da00d3b4dd1b03c4c5cc8718552 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/f4482da00d3b4dd1b03c4c5cc8718552 2024-11-20T15:21:45,919 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/200522c011454e638a8d10fe3b17899b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/200522c011454e638a8d10fe3b17899b 2024-11-20T15:21:45,920 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/ad642adb76fa49b486eeb9a10310dbb7 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/ad642adb76fa49b486eeb9a10310dbb7 2024-11-20T15:21:45,921 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/0a597d3fb3b34685a4dfa4a9c79abfdc to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/0a597d3fb3b34685a4dfa4a9c79abfdc 2024-11-20T15:21:45,922 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/2e78e5b494414afbb5c9d79d299c50eb to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/2e78e5b494414afbb5c9d79d299c50eb 2024-11-20T15:21:45,923 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/eab2325abfc144029b319c246850a14b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/eab2325abfc144029b319c246850a14b 2024-11-20T15:21:45,924 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/9920ff5b8aad4da58f8a747a8c169740 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/9920ff5b8aad4da58f8a747a8c169740 2024-11-20T15:21:45,925 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/a7724d4588f74a81a597a68e21d02950 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/a7724d4588f74a81a597a68e21d02950 2024-11-20T15:21:45,926 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/6d270516b48f44ebbec920813401c027 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/6d270516b48f44ebbec920813401c027 2024-11-20T15:21:45,928 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/b0eb000e9437469ab8880dd2f1ca82db to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/b0eb000e9437469ab8880dd2f1ca82db 2024-11-20T15:21:45,929 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/e83783df50364224a7c01913f8e4ad62 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/e83783df50364224a7c01913f8e4ad62 2024-11-20T15:21:45,930 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/34e1c0e78c484dbb9e5f29450320b44e to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/34e1c0e78c484dbb9e5f29450320b44e 2024-11-20T15:21:45,931 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/ecc19db6810e4ed49ace04c185cd07e6 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/ecc19db6810e4ed49ace04c185cd07e6 2024-11-20T15:21:45,932 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/8267eb82314c433c9a7692488cb08ce5 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/8267eb82314c433c9a7692488cb08ce5 2024-11-20T15:21:45,933 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/347190bf1bbf41ff956cf72b9e254e8a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/347190bf1bbf41ff956cf72b9e254e8a 2024-11-20T15:21:45,935 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/851d9c8c407b4649afd7c14186e69a7f to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/851d9c8c407b4649afd7c14186e69a7f 2024-11-20T15:21:45,936 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f79a2aa455294e188d2f6e1911c9fd5e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/aa99cf67c0c94b90b59c2db93a89525d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/5997f3f1083a4388ab8b9d137f3175a5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/227eff475d42493fac890e2cc6f789ca, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/96bcc62f36e54d32b94a0fee08b51cac, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/0082f12332384355a0cdc169d3d3a236, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/568995664b254ecf8deb717274403d55, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/879215701c0444318d698bc57fffa516, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/1d5f8de737f24dc1944028b8b44ffd55, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/83d6e6d49c514c34afe6a5c96f9b4a23, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/d18957eac61a4602bdf23dbd45035b46, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/47309bd3a02640259056800de3153bad, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/0df48eb2ac0e4d2c966e5e67586c18fe, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/2786c5b037e54855afe55cb1e8c5cd65, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/0feaaf6776df4284889e4a8708dd2f5a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/4f7e05fb9adf4647b060b9a1482999f6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/044c4f3b89b14a5698e3fe509c10c1cb, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/ce93df6a2856427794067fd54adb4aff, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/41a5afc92b9b41749a351ec0fc1ff862, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f41e6785491b45068585b259142faa44, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/6ba83c1cea974c7da38ddcea33946470, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/c3cc71f3777e420fa537d04664780b00, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f39e956b59f64e5d89063264a91b6056, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/57aec85a79b74af39bb0502faacab1b2, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/d4017f8bcee447659047beb01bd28e34, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/3a244233f0c540888313e1313017c2f3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/d85d4fc7b2714f548188dc3fdfe617ee, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/292551e2000144ffae4eb6a04e5b0185, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/4e23b2a12d5d4f37974261b0bd29f2c1, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f38f4c3eb2764360b5ddbff670aa4458, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/17d467cdc3514ad4b3f0aa1dd4063eae, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/15306d9a83c64cacb1cf8dd7c2c70ccd, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/1027c627e14b466e8812777448f855b9, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/5eb0791192f848e5afe55c4eaafe08da, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/319ccaa728754e439960eefe0febc211, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/48dd268d76c24f96879a703cccc318f2, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/11b8b40e400c4d44b8a4f2edc9f6633a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/8ef399b0b477496b9106bf68907d9c66, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/00db410f756d4242ad0f81dfe39c03d2, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/621187f9604745068e230da699ce8d43, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/72810ec77fa442a1b8392137c23f3c3a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/e751c0efb948494a9aa0e005ec63df80, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/476808cbeb5e4041a72389a5a6eaa816, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/dc52dc34dd994158a8c8c41a5eae5bc8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f1d362d6863d494c8827b69aea84c185, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/03ba1cbda6a84716a67263ee9d02290c] to archive 2024-11-20T15:21:45,937 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T15:21:45,939 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f79a2aa455294e188d2f6e1911c9fd5e to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f79a2aa455294e188d2f6e1911c9fd5e 2024-11-20T15:21:45,940 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/aa99cf67c0c94b90b59c2db93a89525d to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/aa99cf67c0c94b90b59c2db93a89525d 2024-11-20T15:21:45,941 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/5997f3f1083a4388ab8b9d137f3175a5 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/5997f3f1083a4388ab8b9d137f3175a5 2024-11-20T15:21:45,942 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/227eff475d42493fac890e2cc6f789ca to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/227eff475d42493fac890e2cc6f789ca 2024-11-20T15:21:45,943 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/96bcc62f36e54d32b94a0fee08b51cac to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/96bcc62f36e54d32b94a0fee08b51cac 2024-11-20T15:21:45,944 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/0082f12332384355a0cdc169d3d3a236 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/0082f12332384355a0cdc169d3d3a236 2024-11-20T15:21:45,945 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/568995664b254ecf8deb717274403d55 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/568995664b254ecf8deb717274403d55 2024-11-20T15:21:45,946 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/879215701c0444318d698bc57fffa516 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/879215701c0444318d698bc57fffa516 2024-11-20T15:21:45,947 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/1d5f8de737f24dc1944028b8b44ffd55 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/1d5f8de737f24dc1944028b8b44ffd55 2024-11-20T15:21:45,949 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/83d6e6d49c514c34afe6a5c96f9b4a23 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/83d6e6d49c514c34afe6a5c96f9b4a23 2024-11-20T15:21:45,950 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/d18957eac61a4602bdf23dbd45035b46 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/d18957eac61a4602bdf23dbd45035b46 2024-11-20T15:21:45,951 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/47309bd3a02640259056800de3153bad to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/47309bd3a02640259056800de3153bad 2024-11-20T15:21:45,952 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/0df48eb2ac0e4d2c966e5e67586c18fe to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/0df48eb2ac0e4d2c966e5e67586c18fe 2024-11-20T15:21:45,953 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/2786c5b037e54855afe55cb1e8c5cd65 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/2786c5b037e54855afe55cb1e8c5cd65 2024-11-20T15:21:45,954 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/0feaaf6776df4284889e4a8708dd2f5a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/0feaaf6776df4284889e4a8708dd2f5a 2024-11-20T15:21:45,955 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/4f7e05fb9adf4647b060b9a1482999f6 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/4f7e05fb9adf4647b060b9a1482999f6 2024-11-20T15:21:45,956 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/044c4f3b89b14a5698e3fe509c10c1cb to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/044c4f3b89b14a5698e3fe509c10c1cb 2024-11-20T15:21:45,958 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/ce93df6a2856427794067fd54adb4aff to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/ce93df6a2856427794067fd54adb4aff 2024-11-20T15:21:45,959 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/41a5afc92b9b41749a351ec0fc1ff862 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/41a5afc92b9b41749a351ec0fc1ff862 2024-11-20T15:21:45,960 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f41e6785491b45068585b259142faa44 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f41e6785491b45068585b259142faa44 2024-11-20T15:21:45,961 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/6ba83c1cea974c7da38ddcea33946470 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/6ba83c1cea974c7da38ddcea33946470 2024-11-20T15:21:45,963 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/c3cc71f3777e420fa537d04664780b00 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/c3cc71f3777e420fa537d04664780b00 2024-11-20T15:21:45,964 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f39e956b59f64e5d89063264a91b6056 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f39e956b59f64e5d89063264a91b6056 2024-11-20T15:21:45,965 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/57aec85a79b74af39bb0502faacab1b2 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/57aec85a79b74af39bb0502faacab1b2 2024-11-20T15:21:45,966 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/d4017f8bcee447659047beb01bd28e34 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/d4017f8bcee447659047beb01bd28e34 2024-11-20T15:21:45,967 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/3a244233f0c540888313e1313017c2f3 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/3a244233f0c540888313e1313017c2f3 2024-11-20T15:21:45,968 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/d85d4fc7b2714f548188dc3fdfe617ee to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/d85d4fc7b2714f548188dc3fdfe617ee 2024-11-20T15:21:45,969 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/292551e2000144ffae4eb6a04e5b0185 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/292551e2000144ffae4eb6a04e5b0185 2024-11-20T15:21:45,970 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/4e23b2a12d5d4f37974261b0bd29f2c1 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/4e23b2a12d5d4f37974261b0bd29f2c1 2024-11-20T15:21:45,971 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f38f4c3eb2764360b5ddbff670aa4458 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f38f4c3eb2764360b5ddbff670aa4458 2024-11-20T15:21:45,972 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/17d467cdc3514ad4b3f0aa1dd4063eae to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/17d467cdc3514ad4b3f0aa1dd4063eae 2024-11-20T15:21:45,972 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/15306d9a83c64cacb1cf8dd7c2c70ccd to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/15306d9a83c64cacb1cf8dd7c2c70ccd 2024-11-20T15:21:45,973 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/1027c627e14b466e8812777448f855b9 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/1027c627e14b466e8812777448f855b9 2024-11-20T15:21:45,974 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/5eb0791192f848e5afe55c4eaafe08da to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/5eb0791192f848e5afe55c4eaafe08da 2024-11-20T15:21:45,976 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/319ccaa728754e439960eefe0febc211 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/319ccaa728754e439960eefe0febc211 2024-11-20T15:21:45,977 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/48dd268d76c24f96879a703cccc318f2 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/48dd268d76c24f96879a703cccc318f2 2024-11-20T15:21:45,978 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/11b8b40e400c4d44b8a4f2edc9f6633a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/11b8b40e400c4d44b8a4f2edc9f6633a 2024-11-20T15:21:45,979 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/8ef399b0b477496b9106bf68907d9c66 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/8ef399b0b477496b9106bf68907d9c66 2024-11-20T15:21:45,980 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/00db410f756d4242ad0f81dfe39c03d2 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/00db410f756d4242ad0f81dfe39c03d2 2024-11-20T15:21:45,981 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/621187f9604745068e230da699ce8d43 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/621187f9604745068e230da699ce8d43 2024-11-20T15:21:45,982 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/72810ec77fa442a1b8392137c23f3c3a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/72810ec77fa442a1b8392137c23f3c3a 2024-11-20T15:21:45,983 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/e751c0efb948494a9aa0e005ec63df80 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/e751c0efb948494a9aa0e005ec63df80 2024-11-20T15:21:45,984 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/476808cbeb5e4041a72389a5a6eaa816 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/476808cbeb5e4041a72389a5a6eaa816 2024-11-20T15:21:45,986 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/dc52dc34dd994158a8c8c41a5eae5bc8 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/dc52dc34dd994158a8c8c41a5eae5bc8 2024-11-20T15:21:45,987 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f1d362d6863d494c8827b69aea84c185 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/f1d362d6863d494c8827b69aea84c185 2024-11-20T15:21:45,988 DEBUG [StoreCloser-TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/03ba1cbda6a84716a67263ee9d02290c to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/03ba1cbda6a84716a67263ee9d02290c 2024-11-20T15:21:45,992 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/recovered.edits/673.seqid, newMaxSeqId=673, maxSeqId=1 2024-11-20T15:21:45,995 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557. 2024-11-20T15:21:45,996 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1635): Region close journal for d2e9d0b7937c41ae63a82116ea698557: 2024-11-20T15:21:45,997 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(170): Closed d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:45,998 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=d2e9d0b7937c41ae63a82116ea698557, regionState=CLOSED 2024-11-20T15:21:46,001 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-11-20T15:21:46,001 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; CloseRegionProcedure d2e9d0b7937c41ae63a82116ea698557, server=0b62285ead89,33387,1732116069954 in 1.6140 sec 2024-11-20T15:21:46,003 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-11-20T15:21:46,003 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2e9d0b7937c41ae63a82116ea698557, UNASSIGN in 1.6190 sec 2024-11-20T15:21:46,005 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-11-20T15:21:46,005 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.6250 sec 2024-11-20T15:21:46,006 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732116106006"}]},"ts":"1732116106006"} 2024-11-20T15:21:46,007 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T15:21:46,011 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T15:21:46,012 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6440 sec 2024-11-20T15:21:46,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-11-20T15:21:46,478 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 40 completed 2024-11-20T15:21:46,480 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T15:21:46,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:21:46,486 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:21:46,487 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=44, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:21:46,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=44 2024-11-20T15:21:46,490 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:46,494 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A, FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B, FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C, FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/recovered.edits] 2024-11-20T15:21:46,497 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/0bea31c7e4e44dc9a68b0cde719645a8 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/0bea31c7e4e44dc9a68b0cde719645a8 2024-11-20T15:21:46,498 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/8d2768b280524292a0292378d12a6335 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/A/8d2768b280524292a0292378d12a6335 2024-11-20T15:21:46,500 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/25b790ac480e4a69921c3ec911df6365 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/25b790ac480e4a69921c3ec911df6365 2024-11-20T15:21:46,502 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/684b04e30eb94b7caf10a73a35011e8d to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/B/684b04e30eb94b7caf10a73a35011e8d 2024-11-20T15:21:46,504 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/846a88d5f45a405498bd6f460a8a34de to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/846a88d5f45a405498bd6f460a8a34de 2024-11-20T15:21:46,505 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/e101549af7124aedbe1220bd729e5793 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/C/e101549af7124aedbe1220bd729e5793 2024-11-20T15:21:46,508 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/recovered.edits/673.seqid to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557/recovered.edits/673.seqid 2024-11-20T15:21:46,508 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/d2e9d0b7937c41ae63a82116ea698557 2024-11-20T15:21:46,509 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T15:21:46,513 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=44, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:21:46,518 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-20T15:21:46,520 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T15:21:46,549 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T15:21:46,551 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=44, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:21:46,551 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T15:21:46,551 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732116106551"}]},"ts":"9223372036854775807"} 2024-11-20T15:21:46,554 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T15:21:46,554 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => d2e9d0b7937c41ae63a82116ea698557, NAME => 'TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T15:21:46,554 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T15:21:46,554 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732116106554"}]},"ts":"9223372036854775807"} 2024-11-20T15:21:46,557 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T15:21:46,559 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=44, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:21:46,560 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 78 msec 2024-11-20T15:21:46,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=44 2024-11-20T15:21:46,588 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 44 completed 2024-11-20T15:21:46,601 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=238 (was 219) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x10bb86e4-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x10bb86e4-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x10bb86e4-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/0b62285ead89:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;0b62285ead89:33387-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x10bb86e4-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_905597479_22 at /127.0.0.1:39740 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=308 (was 223) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6300 (was 6843) 2024-11-20T15:21:46,609 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=238, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=308, ProcessCount=11, AvailableMemoryMB=6299 2024-11-20T15:21:46,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T15:21:46,611 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T15:21:46,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T15:21:46,613 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T15:21:46,613 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:46,613 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 45 2024-11-20T15:21:46,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-20T15:21:46,614 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T15:21:46,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741983_1159 (size=960) 2024-11-20T15:21:46,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-20T15:21:46,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-20T15:21:47,022 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3 2024-11-20T15:21:47,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741984_1160 (size=53) 2024-11-20T15:21:47,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-20T15:21:47,428 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T15:21:47,429 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 6e74b15d2fcbe758642cc4f5cf813621, disabling compactions & flushes 2024-11-20T15:21:47,429 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:47,429 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:47,429 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. after waiting 0 ms 2024-11-20T15:21:47,429 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:47,429 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:47,429 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:21:47,430 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T15:21:47,430 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732116107430"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732116107430"}]},"ts":"1732116107430"} 2024-11-20T15:21:47,431 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T15:21:47,432 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T15:21:47,432 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732116107432"}]},"ts":"1732116107432"} 2024-11-20T15:21:47,433 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T15:21:47,437 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e74b15d2fcbe758642cc4f5cf813621, ASSIGN}] 2024-11-20T15:21:47,438 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e74b15d2fcbe758642cc4f5cf813621, ASSIGN 2024-11-20T15:21:47,438 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e74b15d2fcbe758642cc4f5cf813621, ASSIGN; state=OFFLINE, location=0b62285ead89,33387,1732116069954; forceNewPlan=false, retain=false 2024-11-20T15:21:47,589 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=6e74b15d2fcbe758642cc4f5cf813621, regionState=OPENING, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:21:47,590 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=47, ppid=46, state=RUNNABLE; OpenRegionProcedure 6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954}] 2024-11-20T15:21:47,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-20T15:21:47,742 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:47,745 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=47}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:47,746 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=47}] regionserver.HRegion(7285): Opening region: {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} 2024-11-20T15:21:47,746 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=47}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:47,746 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=47}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T15:21:47,746 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=47}] regionserver.HRegion(7327): checking encryption for 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:47,746 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=47}] regionserver.HRegion(7330): checking classloading for 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:47,748 INFO [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:47,749 INFO [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:21:47,749 INFO [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e74b15d2fcbe758642cc4f5cf813621 columnFamilyName A 2024-11-20T15:21:47,749 DEBUG [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:47,750 INFO [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] regionserver.HStore(327): Store=6e74b15d2fcbe758642cc4f5cf813621/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:21:47,750 INFO [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:47,751 INFO [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:21:47,751 INFO [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e74b15d2fcbe758642cc4f5cf813621 columnFamilyName B 2024-11-20T15:21:47,752 DEBUG [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:47,752 INFO [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] regionserver.HStore(327): Store=6e74b15d2fcbe758642cc4f5cf813621/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:21:47,752 INFO [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:47,753 INFO [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:21:47,753 INFO [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e74b15d2fcbe758642cc4f5cf813621 columnFamilyName C 2024-11-20T15:21:47,753 DEBUG [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:47,754 INFO [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] regionserver.HStore(327): Store=6e74b15d2fcbe758642cc4f5cf813621/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:21:47,754 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=47}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:47,755 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=47}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:47,755 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=47}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:47,756 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=47}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T15:21:47,758 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=47}] regionserver.HRegion(1085): writing seq id for 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:47,760 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=47}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T15:21:47,760 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=47}] regionserver.HRegion(1102): Opened 6e74b15d2fcbe758642cc4f5cf813621; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63862113, jitterRate=-0.04838036000728607}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T15:21:47,761 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=47}] regionserver.HRegion(1001): Region open journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:21:47,761 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=47}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., pid=47, masterSystemTime=1732116107742 2024-11-20T15:21:47,763 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=47}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:47,763 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=47}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:47,763 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=6e74b15d2fcbe758642cc4f5cf813621, regionState=OPEN, openSeqNum=2, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:21:47,766 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=47, resume processing ppid=46 2024-11-20T15:21:47,766 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, ppid=46, state=SUCCESS; OpenRegionProcedure 6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 in 175 msec 2024-11-20T15:21:47,767 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-11-20T15:21:47,767 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e74b15d2fcbe758642cc4f5cf813621, ASSIGN in 329 msec 2024-11-20T15:21:47,768 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T15:21:47,768 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732116107768"}]},"ts":"1732116107768"} 2024-11-20T15:21:47,769 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T15:21:47,772 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T15:21:47,773 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1610 sec 2024-11-20T15:21:48,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-20T15:21:48,719 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-11-20T15:21:48,721 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5d29de25 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a378df6 2024-11-20T15:21:48,725 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cca453a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:21:48,727 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:21:48,728 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58932, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:21:48,730 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T15:21:48,731 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33692, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T15:21:48,736 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T15:21:48,736 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T15:21:48,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=48, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T15:21:48,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741985_1161 (size=996) 2024-11-20T15:21:49,155 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-20T15:21:49,155 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-20T15:21:49,159 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=49, ppid=48, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T15:21:49,169 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e74b15d2fcbe758642cc4f5cf813621, REOPEN/MOVE}] 2024-11-20T15:21:49,170 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=50, ppid=49, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e74b15d2fcbe758642cc4f5cf813621, REOPEN/MOVE 2024-11-20T15:21:49,171 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=50 updating hbase:meta row=6e74b15d2fcbe758642cc4f5cf813621, regionState=CLOSING, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:21:49,172 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T15:21:49,172 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE; CloseRegionProcedure 6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954}] 2024-11-20T15:21:49,324 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:49,324 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=51}] handler.UnassignRegionHandler(124): Close 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:49,324 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=51}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T15:21:49,325 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=51}] regionserver.HRegion(1681): Closing 6e74b15d2fcbe758642cc4f5cf813621, disabling compactions & flushes 2024-11-20T15:21:49,325 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=51}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:49,325 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=51}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:49,325 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=51}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. after waiting 0 ms 2024-11-20T15:21:49,325 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=51}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:49,329 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=51}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T15:21:49,329 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=51}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:49,330 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=51}] regionserver.HRegion(1635): Region close journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:21:49,330 WARN [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=51}] regionserver.HRegionServer(3786): Not adding moved region record: 6e74b15d2fcbe758642cc4f5cf813621 to self. 2024-11-20T15:21:49,331 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=51}] handler.UnassignRegionHandler(170): Closed 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:49,331 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=50 updating hbase:meta row=6e74b15d2fcbe758642cc4f5cf813621, regionState=CLOSED 2024-11-20T15:21:49,334 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=51, resume processing ppid=50 2024-11-20T15:21:49,334 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, ppid=50, state=SUCCESS; CloseRegionProcedure 6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 in 161 msec 2024-11-20T15:21:49,334 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=50, ppid=49, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e74b15d2fcbe758642cc4f5cf813621, REOPEN/MOVE; state=CLOSED, location=0b62285ead89,33387,1732116069954; forceNewPlan=false, retain=true 2024-11-20T15:21:49,485 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=50 updating hbase:meta row=6e74b15d2fcbe758642cc4f5cf813621, regionState=OPENING, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:21:49,487 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=50, state=RUNNABLE; OpenRegionProcedure 6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954}] 2024-11-20T15:21:49,638 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:49,642 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=52}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:49,642 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=52}] regionserver.HRegion(7285): Opening region: {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} 2024-11-20T15:21:49,642 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=52}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:49,642 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=52}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T15:21:49,642 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=52}] regionserver.HRegion(7327): checking encryption for 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:49,642 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=52}] regionserver.HRegion(7330): checking classloading for 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:49,645 INFO [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:49,645 INFO [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:21:49,650 INFO [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e74b15d2fcbe758642cc4f5cf813621 columnFamilyName A 2024-11-20T15:21:49,652 DEBUG [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:49,653 INFO [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] regionserver.HStore(327): Store=6e74b15d2fcbe758642cc4f5cf813621/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:21:49,653 INFO [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:49,654 INFO [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:21:49,654 INFO [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e74b15d2fcbe758642cc4f5cf813621 columnFamilyName B 2024-11-20T15:21:49,654 DEBUG [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:49,654 INFO [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] regionserver.HStore(327): Store=6e74b15d2fcbe758642cc4f5cf813621/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:21:49,654 INFO [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:49,655 INFO [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:21:49,655 INFO [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e74b15d2fcbe758642cc4f5cf813621 columnFamilyName C 2024-11-20T15:21:49,655 DEBUG [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:49,656 INFO [StoreOpener-6e74b15d2fcbe758642cc4f5cf813621-1 {}] regionserver.HStore(327): Store=6e74b15d2fcbe758642cc4f5cf813621/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:21:49,656 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=52}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:49,656 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=52}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:49,658 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=52}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:49,659 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=52}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T15:21:49,660 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=52}] regionserver.HRegion(1085): writing seq id for 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:49,661 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=52}] regionserver.HRegion(1102): Opened 6e74b15d2fcbe758642cc4f5cf813621; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64477214, jitterRate=-0.039214640855789185}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T15:21:49,662 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=52}] regionserver.HRegion(1001): Region open journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:21:49,663 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=52}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., pid=52, masterSystemTime=1732116109638 2024-11-20T15:21:49,665 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=52}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:49,665 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=52}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:49,665 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=50 updating hbase:meta row=6e74b15d2fcbe758642cc4f5cf813621, regionState=OPEN, openSeqNum=5, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:21:49,668 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=50 2024-11-20T15:21:49,668 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=50, state=SUCCESS; OpenRegionProcedure 6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 in 180 msec 2024-11-20T15:21:49,669 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-11-20T15:21:49,670 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e74b15d2fcbe758642cc4f5cf813621, REOPEN/MOVE in 499 msec 2024-11-20T15:21:49,673 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=49, resume processing ppid=48 2024-11-20T15:21:49,673 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, ppid=48, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 513 msec 2024-11-20T15:21:49,677 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 937 msec 2024-11-20T15:21:49,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=48 2024-11-20T15:21:49,687 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2931c73e to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4c7d6279 2024-11-20T15:21:49,693 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c820ef9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:21:49,695 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x491ea2ee to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5b55744e 2024-11-20T15:21:49,705 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e3a4420, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:21:49,707 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x190853fc to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@a9306be 2024-11-20T15:21:49,715 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24f64590, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:21:49,717 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x46114993 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@769942d9 2024-11-20T15:21:49,721 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a4c53ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:21:49,722 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2885d2d9 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@cb464a 2024-11-20T15:21:49,727 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68f0be85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:21:49,728 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x78cafade to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@152377d4 2024-11-20T15:21:49,731 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@517ff977, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:21:49,732 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x14c16cd4 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1a52344f 2024-11-20T15:21:49,735 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3448d233, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:21:49,736 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0341384e to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8ba8425 2024-11-20T15:21:49,739 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a11164b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:21:49,740 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x26b120d9 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7af61386 2024-11-20T15:21:49,743 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8a7e1dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:21:49,746 DEBUG [hconnection-0x9899fbe-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:21:49,747 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:21:49,747 DEBUG [hconnection-0x3e27bc2b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:21:49,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-11-20T15:21:49,748 DEBUG [hconnection-0x4c469bad-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:21:49,748 DEBUG [hconnection-0x742d2d84-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:21:49,749 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58948, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:21:49,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T15:21:49,750 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58962, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:21:49,750 DEBUG [hconnection-0x6e2e094d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:21:49,750 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58970, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:21:49,751 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58974, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:21:49,752 DEBUG [hconnection-0x13dd1438-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:21:49,752 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:21:49,753 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:21:49,753 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:21:49,753 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58978, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:21:49,754 DEBUG [hconnection-0x5f894b6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:21:49,755 DEBUG [hconnection-0x17b22f8e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:21:49,756 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58982, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:21:49,758 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58984, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:21:49,760 DEBUG [hconnection-0x6b4ab071-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:21:49,760 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58994, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:21:49,761 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58996, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:21:49,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:49,777 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e74b15d2fcbe758642cc4f5cf813621 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T15:21:49,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=A 2024-11-20T15:21:49,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:49,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=B 2024-11-20T15:21:49,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:49,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=C 2024-11-20T15:21:49,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:49,832 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:49,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58948 deadline: 1732116169826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:49,833 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:49,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116169828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:49,833 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:49,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58982 deadline: 1732116169829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:49,834 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:49,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58974 deadline: 1732116169830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:49,833 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:49,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58994 deadline: 1732116169828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:49,845 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204a62c1b37e0a479197bbb21060fb92b9_6e74b15d2fcbe758642cc4f5cf813621 is 50, key is test_row_0/A:col10/1732116109776/Put/seqid=0 2024-11-20T15:21:49,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T15:21:49,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741986_1162 (size=17034) 2024-11-20T15:21:49,877 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:49,887 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204a62c1b37e0a479197bbb21060fb92b9_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204a62c1b37e0a479197bbb21060fb92b9_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:49,888 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/edbb8930bc1c4716a1c3f20c1522fb67, store: [table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:21:49,898 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/edbb8930bc1c4716a1c3f20c1522fb67 is 175, key is test_row_0/A:col10/1732116109776/Put/seqid=0 2024-11-20T15:21:49,905 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:49,906 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T15:21:49,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:49,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:21:49,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:49,907 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:49,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:49,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:49,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741987_1163 (size=48139) 2024-11-20T15:21:49,927 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/edbb8930bc1c4716a1c3f20c1522fb67 2024-11-20T15:21:49,936 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:49,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:49,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58982 deadline: 1732116169936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:49,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116169935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:49,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:49,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58994 deadline: 1732116169937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:49,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:49,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:49,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58974 deadline: 1732116169938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:49,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58948 deadline: 1732116169938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:49,979 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/12bccbf2bfdc43299dc6d4e7b20ae1f3 is 50, key is test_row_0/B:col10/1732116109776/Put/seqid=0 2024-11-20T15:21:49,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741988_1164 (size=12001) 2024-11-20T15:21:49,992 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/12bccbf2bfdc43299dc6d4e7b20ae1f3 2024-11-20T15:21:50,030 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/8e4bb6a87f874ff3b2bd626aa822d428 is 50, key is test_row_0/C:col10/1732116109776/Put/seqid=0 2024-11-20T15:21:50,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741989_1165 (size=12001) 2024-11-20T15:21:50,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T15:21:50,059 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:50,060 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T15:21:50,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:50,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:21:50,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:50,061 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:50,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:50,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:50,142 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:50,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58994 deadline: 1732116170140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:50,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:50,144 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:50,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58982 deadline: 1732116170143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:50,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58974 deadline: 1732116170141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:50,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:50,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116170143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:50,145 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:50,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58948 deadline: 1732116170141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:50,214 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:50,214 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T15:21:50,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:50,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:21:50,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:50,215 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:50,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:50,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:50,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T15:21:50,368 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:50,368 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T15:21:50,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:50,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:21:50,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:50,369 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:50,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:50,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:50,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:50,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58974 deadline: 1732116170446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:50,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:50,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58994 deadline: 1732116170447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:50,448 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:50,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58982 deadline: 1732116170447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:50,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:50,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58948 deadline: 1732116170448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:50,450 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:50,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116170449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:50,453 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/8e4bb6a87f874ff3b2bd626aa822d428 2024-11-20T15:21:50,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/edbb8930bc1c4716a1c3f20c1522fb67 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/edbb8930bc1c4716a1c3f20c1522fb67 2024-11-20T15:21:50,467 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/edbb8930bc1c4716a1c3f20c1522fb67, entries=250, sequenceid=16, filesize=47.0 K 2024-11-20T15:21:50,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/12bccbf2bfdc43299dc6d4e7b20ae1f3 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/12bccbf2bfdc43299dc6d4e7b20ae1f3 2024-11-20T15:21:50,474 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/12bccbf2bfdc43299dc6d4e7b20ae1f3, entries=150, sequenceid=16, filesize=11.7 K 2024-11-20T15:21:50,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/8e4bb6a87f874ff3b2bd626aa822d428 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/8e4bb6a87f874ff3b2bd626aa822d428 2024-11-20T15:21:50,481 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/8e4bb6a87f874ff3b2bd626aa822d428, entries=150, sequenceid=16, filesize=11.7 K 2024-11-20T15:21:50,482 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 6e74b15d2fcbe758642cc4f5cf813621 in 705ms, sequenceid=16, compaction requested=false 2024-11-20T15:21:50,482 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:21:50,521 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:50,522 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T15:21:50,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:50,522 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 6e74b15d2fcbe758642cc4f5cf813621 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T15:21:50,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=A 2024-11-20T15:21:50,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:50,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=B 2024-11-20T15:21:50,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:50,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=C 2024-11-20T15:21:50,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:50,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112016380897f0334fef97436a992ca3d7ce_6e74b15d2fcbe758642cc4f5cf813621 is 50, key is test_row_0/A:col10/1732116109825/Put/seqid=0 2024-11-20T15:21:50,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741990_1166 (size=12154) 2024-11-20T15:21:50,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:50,568 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112016380897f0334fef97436a992ca3d7ce_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112016380897f0334fef97436a992ca3d7ce_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:50,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/c07072bab03e4092b4e02a83205c14e5, store: [table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:21:50,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/c07072bab03e4092b4e02a83205c14e5 is 175, key is test_row_0/A:col10/1732116109825/Put/seqid=0 2024-11-20T15:21:50,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741991_1167 (size=30955) 2024-11-20T15:21:50,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T15:21:50,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:21:50,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:50,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:50,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58974 deadline: 1732116170959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:50,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:50,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58948 deadline: 1732116170960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:50,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:50,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116170961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:50,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:50,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58994 deadline: 1732116170961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:50,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:50,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58982 deadline: 1732116170963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:51,003 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/c07072bab03e4092b4e02a83205c14e5 2024-11-20T15:21:51,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/b4289aaa69a84340837d0b9182fec570 is 50, key is test_row_0/B:col10/1732116109825/Put/seqid=0 2024-11-20T15:21:51,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741992_1168 (size=12001) 2024-11-20T15:21:51,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:51,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58974 deadline: 1732116171064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:51,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:51,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58948 deadline: 1732116171065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:51,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:51,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58994 deadline: 1732116171066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:51,068 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:51,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116171066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:51,068 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:51,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58982 deadline: 1732116171068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:51,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:51,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58974 deadline: 1732116171267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:51,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:51,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58994 deadline: 1732116171269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:51,272 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:51,273 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:51,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116171269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:51,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58948 deadline: 1732116171269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:51,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:51,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58982 deadline: 1732116171270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:51,432 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/b4289aaa69a84340837d0b9182fec570 2024-11-20T15:21:51,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/cedf261d0780417ab153821adf31b048 is 50, key is test_row_0/C:col10/1732116109825/Put/seqid=0 2024-11-20T15:21:51,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741993_1169 (size=12001) 2024-11-20T15:21:51,454 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/cedf261d0780417ab153821adf31b048 2024-11-20T15:21:51,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/c07072bab03e4092b4e02a83205c14e5 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c07072bab03e4092b4e02a83205c14e5 2024-11-20T15:21:51,472 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c07072bab03e4092b4e02a83205c14e5, entries=150, sequenceid=40, filesize=30.2 K 2024-11-20T15:21:51,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/b4289aaa69a84340837d0b9182fec570 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/b4289aaa69a84340837d0b9182fec570 2024-11-20T15:21:51,479 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/b4289aaa69a84340837d0b9182fec570, entries=150, sequenceid=40, filesize=11.7 K 2024-11-20T15:21:51,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/cedf261d0780417ab153821adf31b048 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/cedf261d0780417ab153821adf31b048 2024-11-20T15:21:51,491 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/cedf261d0780417ab153821adf31b048, entries=150, sequenceid=40, filesize=11.7 K 2024-11-20T15:21:51,493 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 6e74b15d2fcbe758642cc4f5cf813621 in 971ms, sequenceid=40, compaction requested=false 2024-11-20T15:21:51,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:21:51,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:51,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-20T15:21:51,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-11-20T15:21:51,496 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-20T15:21:51,496 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7410 sec 2024-11-20T15:21:51,497 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 1.7490 sec 2024-11-20T15:21:51,527 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T15:21:51,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:51,577 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e74b15d2fcbe758642cc4f5cf813621 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T15:21:51,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=A 2024-11-20T15:21:51,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:51,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=B 2024-11-20T15:21:51,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:51,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=C 2024-11-20T15:21:51,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:51,588 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e84940f5bf6c44d28dfc883b18217be9_6e74b15d2fcbe758642cc4f5cf813621 is 50, key is test_row_0/A:col10/1732116111575/Put/seqid=0 2024-11-20T15:21:51,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:51,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58994 deadline: 1732116171599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:51,602 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:51,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116171600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:51,605 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:51,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58982 deadline: 1732116171601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:51,605 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:51,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58948 deadline: 1732116171601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:51,606 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:51,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58974 deadline: 1732116171602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:51,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741994_1170 (size=14594) 2024-11-20T15:21:51,629 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:51,635 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e84940f5bf6c44d28dfc883b18217be9_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e84940f5bf6c44d28dfc883b18217be9_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:51,636 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/c1a8c18689b4489695165f677e4b9d93, store: [table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:21:51,637 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/c1a8c18689b4489695165f677e4b9d93 is 175, key is test_row_0/A:col10/1732116111575/Put/seqid=0 2024-11-20T15:21:51,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741995_1171 (size=39549) 2024-11-20T15:21:51,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:51,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58994 deadline: 1732116171703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:51,707 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:51,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116171704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:51,709 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:51,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58982 deadline: 1732116171706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:51,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:51,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58948 deadline: 1732116171707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:51,711 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:51,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58974 deadline: 1732116171707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:51,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T15:21:51,855 INFO [Thread-779 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-20T15:21:51,857 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:21:51,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-11-20T15:21:51,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T15:21:51,859 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:21:51,860 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:21:51,860 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:21:51,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:51,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58994 deadline: 1732116171908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:51,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:51,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116171908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:51,913 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:51,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58982 deadline: 1732116171911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:51,913 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:51,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58948 deadline: 1732116171912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:51,917 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:51,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58974 deadline: 1732116171916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:51,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T15:21:52,012 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:52,012 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T15:21:52,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:52,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:21:52,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:52,013 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:52,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:52,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:52,047 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/c1a8c18689b4489695165f677e4b9d93 2024-11-20T15:21:52,058 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/a511fe7965914f67a576245380590f18 is 50, key is test_row_0/B:col10/1732116111575/Put/seqid=0 2024-11-20T15:21:52,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741996_1172 (size=12001) 2024-11-20T15:21:52,066 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/a511fe7965914f67a576245380590f18 2024-11-20T15:21:52,077 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/c1b68c31d3ff4883a50c7b411adebd1a is 50, key is test_row_0/C:col10/1732116111575/Put/seqid=0 2024-11-20T15:21:52,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741997_1173 (size=12001) 2024-11-20T15:21:52,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T15:21:52,166 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:52,167 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T15:21:52,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:52,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:21:52,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:52,168 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:52,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:52,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:52,215 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:52,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58994 deadline: 1732116172213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:52,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:52,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58948 deadline: 1732116172214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:52,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:52,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58982 deadline: 1732116172219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:52,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:52,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58974 deadline: 1732116172220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:52,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:52,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116172220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:52,322 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:52,322 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T15:21:52,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:52,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:21:52,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:52,323 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:52,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:52,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:52,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T15:21:52,476 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:52,477 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T15:21:52,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:52,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:21:52,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:52,477 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:52,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:52,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:52,490 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/c1b68c31d3ff4883a50c7b411adebd1a 2024-11-20T15:21:52,502 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/c1a8c18689b4489695165f677e4b9d93 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c1a8c18689b4489695165f677e4b9d93 2024-11-20T15:21:52,510 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c1a8c18689b4489695165f677e4b9d93, entries=200, sequenceid=54, filesize=38.6 K 2024-11-20T15:21:52,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/a511fe7965914f67a576245380590f18 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/a511fe7965914f67a576245380590f18 2024-11-20T15:21:52,521 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/a511fe7965914f67a576245380590f18, entries=150, sequenceid=54, filesize=11.7 K 2024-11-20T15:21:52,522 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/c1b68c31d3ff4883a50c7b411adebd1a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/c1b68c31d3ff4883a50c7b411adebd1a 2024-11-20T15:21:52,527 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/c1b68c31d3ff4883a50c7b411adebd1a, entries=150, sequenceid=54, filesize=11.7 K 2024-11-20T15:21:52,531 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 6e74b15d2fcbe758642cc4f5cf813621 in 951ms, sequenceid=54, compaction requested=true 2024-11-20T15:21:52,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:21:52,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e74b15d2fcbe758642cc4f5cf813621:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:21:52,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:52,531 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:52,531 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:52,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e74b15d2fcbe758642cc4f5cf813621:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:21:52,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:52,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e74b15d2fcbe758642cc4f5cf813621:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:21:52,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:52,533 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 118643 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:52,533 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 6e74b15d2fcbe758642cc4f5cf813621/A is initiating minor compaction (all files) 2024-11-20T15:21:52,533 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e74b15d2fcbe758642cc4f5cf813621/A in TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:52,533 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/edbb8930bc1c4716a1c3f20c1522fb67, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c07072bab03e4092b4e02a83205c14e5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c1a8c18689b4489695165f677e4b9d93] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp, totalSize=115.9 K 2024-11-20T15:21:52,534 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:52,534 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. files: [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/edbb8930bc1c4716a1c3f20c1522fb67, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c07072bab03e4092b4e02a83205c14e5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c1a8c18689b4489695165f677e4b9d93] 2024-11-20T15:21:52,534 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:52,534 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e74b15d2fcbe758642cc4f5cf813621/B is initiating minor compaction (all files) 2024-11-20T15:21:52,534 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e74b15d2fcbe758642cc4f5cf813621/B in TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:52,534 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/12bccbf2bfdc43299dc6d4e7b20ae1f3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/b4289aaa69a84340837d0b9182fec570, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/a511fe7965914f67a576245380590f18] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp, totalSize=35.2 K 2024-11-20T15:21:52,536 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting edbb8930bc1c4716a1c3f20c1522fb67, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732116109769 2024-11-20T15:21:52,536 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 12bccbf2bfdc43299dc6d4e7b20ae1f3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732116109769 2024-11-20T15:21:52,536 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting b4289aaa69a84340837d0b9182fec570, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732116109825 2024-11-20T15:21:52,537 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting c07072bab03e4092b4e02a83205c14e5, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732116109825 2024-11-20T15:21:52,537 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting c1a8c18689b4489695165f677e4b9d93, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732116110958 2024-11-20T15:21:52,537 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting a511fe7965914f67a576245380590f18, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732116110958 2024-11-20T15:21:52,562 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e74b15d2fcbe758642cc4f5cf813621#B#compaction#153 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:52,563 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/a07bc14cba89470ab8425c4db3c4215d is 50, key is test_row_0/B:col10/1732116111575/Put/seqid=0 2024-11-20T15:21:52,573 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:21:52,582 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411203068cd9328bc4128a9bc661642ef8f65_6e74b15d2fcbe758642cc4f5cf813621 store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:21:52,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741998_1174 (size=12104) 2024-11-20T15:21:52,590 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411203068cd9328bc4128a9bc661642ef8f65_6e74b15d2fcbe758642cc4f5cf813621, store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:21:52,590 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203068cd9328bc4128a9bc661642ef8f65_6e74b15d2fcbe758642cc4f5cf813621 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:21:52,591 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/a07bc14cba89470ab8425c4db3c4215d as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/a07bc14cba89470ab8425c4db3c4215d 2024-11-20T15:21:52,598 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e74b15d2fcbe758642cc4f5cf813621/B of 6e74b15d2fcbe758642cc4f5cf813621 into a07bc14cba89470ab8425c4db3c4215d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:52,598 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:21:52,599 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., storeName=6e74b15d2fcbe758642cc4f5cf813621/B, priority=13, startTime=1732116112531; duration=0sec 2024-11-20T15:21:52,600 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:52,600 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e74b15d2fcbe758642cc4f5cf813621:B 2024-11-20T15:21:52,600 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:52,601 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:52,601 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e74b15d2fcbe758642cc4f5cf813621/C is initiating minor compaction (all files) 2024-11-20T15:21:52,601 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e74b15d2fcbe758642cc4f5cf813621/C in TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:52,601 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/8e4bb6a87f874ff3b2bd626aa822d428, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/cedf261d0780417ab153821adf31b048, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/c1b68c31d3ff4883a50c7b411adebd1a] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp, totalSize=35.2 K 2024-11-20T15:21:52,602 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e4bb6a87f874ff3b2bd626aa822d428, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732116109769 2024-11-20T15:21:52,602 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting cedf261d0780417ab153821adf31b048, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732116109825 2024-11-20T15:21:52,604 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting c1b68c31d3ff4883a50c7b411adebd1a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732116110958 2024-11-20T15:21:52,629 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:52,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T15:21:52,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:52,630 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 6e74b15d2fcbe758642cc4f5cf813621 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T15:21:52,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=A 2024-11-20T15:21:52,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:52,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=B 2024-11-20T15:21:52,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:52,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=C 2024-11-20T15:21:52,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:52,636 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e74b15d2fcbe758642cc4f5cf813621#C#compaction#155 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:52,638 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/eb5ccfb7be9e495dae67c23143c9ef92 is 50, key is test_row_0/C:col10/1732116111575/Put/seqid=0 2024-11-20T15:21:52,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741999_1175 (size=4469) 2024-11-20T15:21:52,660 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e74b15d2fcbe758642cc4f5cf813621#A#compaction#154 average throughput is 0.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:52,663 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/6297cf0fe6fb4e95a12e7e5f0da8a03d is 175, key is test_row_0/A:col10/1732116111575/Put/seqid=0 2024-11-20T15:21:52,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b4062c1068094508a9ea5bb8d4442c11_6e74b15d2fcbe758642cc4f5cf813621 is 50, key is test_row_0/A:col10/1732116111599/Put/seqid=0 2024-11-20T15:21:52,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742000_1176 (size=12104) 2024-11-20T15:21:52,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742001_1177 (size=31058) 2024-11-20T15:21:52,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:52,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:21:52,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742002_1178 (size=12154) 2024-11-20T15:21:52,736 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:52,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58982 deadline: 1732116172732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:52,740 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:52,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116172728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:52,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:52,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58974 deadline: 1732116172734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:52,742 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:52,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58948 deadline: 1732116172735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:52,742 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:52,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58994 deadline: 1732116172736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:52,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:52,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58982 deadline: 1732116172837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:52,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:52,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58974 deadline: 1732116172843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:52,846 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:52,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58948 deadline: 1732116172843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:52,847 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:52,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58994 deadline: 1732116172843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:52,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T15:21:53,042 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:53,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58982 deadline: 1732116173041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:53,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:53,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58974 deadline: 1732116173046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:53,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:53,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58948 deadline: 1732116173049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:53,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:53,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58994 deadline: 1732116173050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:53,111 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/eb5ccfb7be9e495dae67c23143c9ef92 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/eb5ccfb7be9e495dae67c23143c9ef92 2024-11-20T15:21:53,119 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/6297cf0fe6fb4e95a12e7e5f0da8a03d as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/6297cf0fe6fb4e95a12e7e5f0da8a03d 2024-11-20T15:21:53,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:53,126 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e74b15d2fcbe758642cc4f5cf813621/C of 6e74b15d2fcbe758642cc4f5cf813621 into eb5ccfb7be9e495dae67c23143c9ef92(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:53,126 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:21:53,126 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., storeName=6e74b15d2fcbe758642cc4f5cf813621/C, priority=13, startTime=1732116112532; duration=0sec 2024-11-20T15:21:53,127 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:53,127 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e74b15d2fcbe758642cc4f5cf813621:C 2024-11-20T15:21:53,131 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e74b15d2fcbe758642cc4f5cf813621/A of 6e74b15d2fcbe758642cc4f5cf813621 into 6297cf0fe6fb4e95a12e7e5f0da8a03d(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:53,131 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:21:53,131 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., storeName=6e74b15d2fcbe758642cc4f5cf813621/A, priority=13, startTime=1732116112531; duration=0sec 2024-11-20T15:21:53,131 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:53,131 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e74b15d2fcbe758642cc4f5cf813621:A 2024-11-20T15:21:53,132 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b4062c1068094508a9ea5bb8d4442c11_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b4062c1068094508a9ea5bb8d4442c11_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:53,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/c2e88296ae714b06b8e8e949681dfd1d, store: [table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:21:53,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/c2e88296ae714b06b8e8e949681dfd1d is 175, key is test_row_0/A:col10/1732116111599/Put/seqid=0 2024-11-20T15:21:53,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742003_1179 (size=30955) 2024-11-20T15:21:53,153 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=77, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/c2e88296ae714b06b8e8e949681dfd1d 2024-11-20T15:21:53,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/1c69185bf997447cb037095a9eb1ff30 is 50, key is test_row_0/B:col10/1732116111599/Put/seqid=0 2024-11-20T15:21:53,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742004_1180 (size=12001) 2024-11-20T15:21:53,173 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/1c69185bf997447cb037095a9eb1ff30 2024-11-20T15:21:53,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/100b1fb8ffa6430c812a220e04c90fba is 50, key is test_row_0/C:col10/1732116111599/Put/seqid=0 2024-11-20T15:21:53,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742005_1181 (size=12001) 2024-11-20T15:21:53,345 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:53,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58982 deadline: 1732116173344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:53,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:53,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58974 deadline: 1732116173350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:53,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:53,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58994 deadline: 1732116173355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:53,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:53,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58948 deadline: 1732116173355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:53,588 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/100b1fb8ffa6430c812a220e04c90fba 2024-11-20T15:21:53,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/c2e88296ae714b06b8e8e949681dfd1d as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c2e88296ae714b06b8e8e949681dfd1d 2024-11-20T15:21:53,606 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c2e88296ae714b06b8e8e949681dfd1d, entries=150, sequenceid=77, filesize=30.2 K 2024-11-20T15:21:53,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/1c69185bf997447cb037095a9eb1ff30 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/1c69185bf997447cb037095a9eb1ff30 2024-11-20T15:21:53,614 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/1c69185bf997447cb037095a9eb1ff30, entries=150, sequenceid=77, filesize=11.7 K 2024-11-20T15:21:53,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/100b1fb8ffa6430c812a220e04c90fba as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/100b1fb8ffa6430c812a220e04c90fba 2024-11-20T15:21:53,623 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/100b1fb8ffa6430c812a220e04c90fba, entries=150, sequenceid=77, filesize=11.7 K 2024-11-20T15:21:53,625 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 6e74b15d2fcbe758642cc4f5cf813621 in 995ms, sequenceid=77, compaction requested=false 2024-11-20T15:21:53,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:21:53,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:53,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-11-20T15:21:53,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-11-20T15:21:53,628 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-20T15:21:53,628 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7660 sec 2024-11-20T15:21:53,630 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 1.7720 sec 2024-11-20T15:21:53,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:53,746 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e74b15d2fcbe758642cc4f5cf813621 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T15:21:53,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=A 2024-11-20T15:21:53,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:53,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=B 2024-11-20T15:21:53,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:53,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=C 2024-11-20T15:21:53,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:53,756 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202cdcce4c7ac949afa6df8a9e72823d12_6e74b15d2fcbe758642cc4f5cf813621 is 50, key is test_row_0/A:col10/1732116112735/Put/seqid=0 2024-11-20T15:21:53,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742006_1182 (size=14594) 2024-11-20T15:21:53,773 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:53,780 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202cdcce4c7ac949afa6df8a9e72823d12_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202cdcce4c7ac949afa6df8a9e72823d12_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:53,781 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/82435ee830cc4dfcb480b1b9f8bbdbf9, store: [table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:21:53,782 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/82435ee830cc4dfcb480b1b9f8bbdbf9 is 175, key is test_row_0/A:col10/1732116112735/Put/seqid=0 2024-11-20T15:21:53,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742007_1183 (size=39549) 2024-11-20T15:21:53,803 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/82435ee830cc4dfcb480b1b9f8bbdbf9 2024-11-20T15:21:53,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:53,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116173841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:53,846 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/f8762f1fa06643dfaf1ebf7e2e80fad0 is 50, key is test_row_0/B:col10/1732116112735/Put/seqid=0 2024-11-20T15:21:53,849 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:53,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58982 deadline: 1732116173849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:53,855 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:53,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58974 deadline: 1732116173854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:53,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742008_1184 (size=12001) 2024-11-20T15:21:53,863 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/f8762f1fa06643dfaf1ebf7e2e80fad0 2024-11-20T15:21:53,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:53,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:53,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58994 deadline: 1732116173863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:53,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58948 deadline: 1732116173862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:53,878 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/db7c5b806f6c42d4b69c33d48847a56b is 50, key is test_row_0/C:col10/1732116112735/Put/seqid=0 2024-11-20T15:21:53,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742009_1185 (size=12001) 2024-11-20T15:21:53,923 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/db7c5b806f6c42d4b69c33d48847a56b 2024-11-20T15:21:53,931 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/82435ee830cc4dfcb480b1b9f8bbdbf9 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/82435ee830cc4dfcb480b1b9f8bbdbf9 2024-11-20T15:21:53,937 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/82435ee830cc4dfcb480b1b9f8bbdbf9, entries=200, sequenceid=94, filesize=38.6 K 2024-11-20T15:21:53,939 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/f8762f1fa06643dfaf1ebf7e2e80fad0 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/f8762f1fa06643dfaf1ebf7e2e80fad0 2024-11-20T15:21:53,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:53,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116173945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:53,952 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/f8762f1fa06643dfaf1ebf7e2e80fad0, entries=150, sequenceid=94, filesize=11.7 K 2024-11-20T15:21:53,954 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/db7c5b806f6c42d4b69c33d48847a56b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/db7c5b806f6c42d4b69c33d48847a56b 2024-11-20T15:21:53,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T15:21:53,966 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/db7c5b806f6c42d4b69c33d48847a56b, entries=150, sequenceid=94, filesize=11.7 K 2024-11-20T15:21:53,966 INFO [Thread-779 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-11-20T15:21:53,968 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 6e74b15d2fcbe758642cc4f5cf813621 in 221ms, sequenceid=94, compaction requested=true 2024-11-20T15:21:53,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:21:53,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e74b15d2fcbe758642cc4f5cf813621:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:21:53,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:53,968 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:53,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e74b15d2fcbe758642cc4f5cf813621:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:21:53,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:53,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e74b15d2fcbe758642cc4f5cf813621:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:21:53,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:53,968 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:53,969 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:21:53,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-11-20T15:21:53,970 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:53,971 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e74b15d2fcbe758642cc4f5cf813621/B is initiating minor compaction (all files) 2024-11-20T15:21:53,971 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e74b15d2fcbe758642cc4f5cf813621/B in TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:53,971 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101562 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:53,971 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/a07bc14cba89470ab8425c4db3c4215d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/1c69185bf997447cb037095a9eb1ff30, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/f8762f1fa06643dfaf1ebf7e2e80fad0] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp, totalSize=35.3 K 2024-11-20T15:21:53,971 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 6e74b15d2fcbe758642cc4f5cf813621/A is initiating minor compaction (all files) 2024-11-20T15:21:53,971 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e74b15d2fcbe758642cc4f5cf813621/A in TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:53,971 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/6297cf0fe6fb4e95a12e7e5f0da8a03d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c2e88296ae714b06b8e8e949681dfd1d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/82435ee830cc4dfcb480b1b9f8bbdbf9] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp, totalSize=99.2 K 2024-11-20T15:21:53,971 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:53,971 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:21:53,971 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. files: [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/6297cf0fe6fb4e95a12e7e5f0da8a03d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c2e88296ae714b06b8e8e949681dfd1d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/82435ee830cc4dfcb480b1b9f8bbdbf9] 2024-11-20T15:21:53,972 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting a07bc14cba89470ab8425c4db3c4215d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732116110958 2024-11-20T15:21:53,972 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6297cf0fe6fb4e95a12e7e5f0da8a03d, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732116110958 2024-11-20T15:21:53,972 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:21:53,972 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:21:53,972 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c69185bf997447cb037095a9eb1ff30, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732116111599 2024-11-20T15:21:53,973 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting c2e88296ae714b06b8e8e949681dfd1d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732116111599 2024-11-20T15:21:53,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T15:21:53,973 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting f8762f1fa06643dfaf1ebf7e2e80fad0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732116112731 2024-11-20T15:21:53,973 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82435ee830cc4dfcb480b1b9f8bbdbf9, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732116112730 2024-11-20T15:21:53,994 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e74b15d2fcbe758642cc4f5cf813621#B#compaction#162 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:53,995 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/6551b5b064ec4bd3946b484d642a9c47 is 50, key is test_row_0/B:col10/1732116112735/Put/seqid=0 2024-11-20T15:21:54,009 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:21:54,025 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112040f531d467fb41f19b32aac1da3542c1_6e74b15d2fcbe758642cc4f5cf813621 store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:21:54,028 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112040f531d467fb41f19b32aac1da3542c1_6e74b15d2fcbe758642cc4f5cf813621, store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:21:54,029 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112040f531d467fb41f19b32aac1da3542c1_6e74b15d2fcbe758642cc4f5cf813621 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:21:54,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742010_1186 (size=12207) 2024-11-20T15:21:54,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742011_1187 (size=4469) 2024-11-20T15:21:54,056 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e74b15d2fcbe758642cc4f5cf813621#A#compaction#163 average throughput is 0.52 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:54,057 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/16fea317fd7d42b3a8540ec28090d00c is 175, key is test_row_0/A:col10/1732116112735/Put/seqid=0 2024-11-20T15:21:54,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T15:21:54,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742012_1188 (size=31161) 2024-11-20T15:21:54,083 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/16fea317fd7d42b3a8540ec28090d00c as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/16fea317fd7d42b3a8540ec28090d00c 2024-11-20T15:21:54,090 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e74b15d2fcbe758642cc4f5cf813621/A of 6e74b15d2fcbe758642cc4f5cf813621 into 16fea317fd7d42b3a8540ec28090d00c(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:54,090 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:21:54,090 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., storeName=6e74b15d2fcbe758642cc4f5cf813621/A, priority=13, startTime=1732116113968; duration=0sec 2024-11-20T15:21:54,091 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:54,091 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e74b15d2fcbe758642cc4f5cf813621:A 2024-11-20T15:21:54,091 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:21:54,092 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:21:54,092 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 6e74b15d2fcbe758642cc4f5cf813621/C is initiating minor compaction (all files) 2024-11-20T15:21:54,092 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e74b15d2fcbe758642cc4f5cf813621/C in TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:54,092 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/eb5ccfb7be9e495dae67c23143c9ef92, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/100b1fb8ffa6430c812a220e04c90fba, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/db7c5b806f6c42d4b69c33d48847a56b] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp, totalSize=35.3 K 2024-11-20T15:21:54,093 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb5ccfb7be9e495dae67c23143c9ef92, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732116110958 2024-11-20T15:21:54,093 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 100b1fb8ffa6430c812a220e04c90fba, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732116111599 2024-11-20T15:21:54,094 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting db7c5b806f6c42d4b69c33d48847a56b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732116112731 2024-11-20T15:21:54,103 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e74b15d2fcbe758642cc4f5cf813621#C#compaction#164 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:54,103 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/6c16f73ba2ba40c08ca03628bca6b784 is 50, key is test_row_0/C:col10/1732116112735/Put/seqid=0 2024-11-20T15:21:54,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742013_1189 (size=12207) 2024-11-20T15:21:54,117 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/6c16f73ba2ba40c08ca03628bca6b784 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/6c16f73ba2ba40c08ca03628bca6b784 2024-11-20T15:21:54,124 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:54,125 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e74b15d2fcbe758642cc4f5cf813621/C of 6e74b15d2fcbe758642cc4f5cf813621 into 6c16f73ba2ba40c08ca03628bca6b784(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:54,125 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:21:54,125 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., storeName=6e74b15d2fcbe758642cc4f5cf813621/C, priority=13, startTime=1732116113968; duration=0sec 2024-11-20T15:21:54,125 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:54,125 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e74b15d2fcbe758642cc4f5cf813621:C 2024-11-20T15:21:54,126 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T15:21:54,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:54,126 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 6e74b15d2fcbe758642cc4f5cf813621 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T15:21:54,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=A 2024-11-20T15:21:54,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:54,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=B 2024-11-20T15:21:54,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:54,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=C 2024-11-20T15:21:54,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:54,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411208c327a2ab3bc41858e22d6e7c92a6c69_6e74b15d2fcbe758642cc4f5cf813621 is 50, key is test_row_0/A:col10/1732116113802/Put/seqid=0 2024-11-20T15:21:54,152 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:21:54,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:54,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742014_1190 (size=12154) 2024-11-20T15:21:54,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:54,165 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411208c327a2ab3bc41858e22d6e7c92a6c69_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411208c327a2ab3bc41858e22d6e7c92a6c69_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:54,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/d9aa355e30814b80acf0df0081e69f95, store: [table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:21:54,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/d9aa355e30814b80acf0df0081e69f95 is 175, key is test_row_0/A:col10/1732116113802/Put/seqid=0 2024-11-20T15:21:54,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742015_1191 (size=30955) 2024-11-20T15:21:54,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:54,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116174181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:54,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T15:21:54,284 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:54,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116174283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:54,451 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/6551b5b064ec4bd3946b484d642a9c47 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/6551b5b064ec4bd3946b484d642a9c47 2024-11-20T15:21:54,457 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e74b15d2fcbe758642cc4f5cf813621/B of 6e74b15d2fcbe758642cc4f5cf813621 into 6551b5b064ec4bd3946b484d642a9c47(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:54,457 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:21:54,457 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., storeName=6e74b15d2fcbe758642cc4f5cf813621/B, priority=13, startTime=1732116113968; duration=0sec 2024-11-20T15:21:54,457 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:54,457 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e74b15d2fcbe758642cc4f5cf813621:B 2024-11-20T15:21:54,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:54,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116174487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:54,575 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=117, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/d9aa355e30814b80acf0df0081e69f95 2024-11-20T15:21:54,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T15:21:54,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/dfc21d08c27245a1bd06e6e9fea34479 is 50, key is test_row_0/B:col10/1732116113802/Put/seqid=0 2024-11-20T15:21:54,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742016_1192 (size=12001) 2024-11-20T15:21:54,595 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/dfc21d08c27245a1bd06e6e9fea34479 2024-11-20T15:21:54,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/fb54bea9823543d996341075bcb0a012 is 50, key is test_row_0/C:col10/1732116113802/Put/seqid=0 2024-11-20T15:21:54,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742017_1193 (size=12001) 2024-11-20T15:21:54,791 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:54,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116174791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:54,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:54,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58982 deadline: 1732116174854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:54,862 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:54,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58974 deadline: 1732116174862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:54,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:54,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58994 deadline: 1732116174866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:54,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:54,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58948 deadline: 1732116174872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:55,010 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/fb54bea9823543d996341075bcb0a012 2024-11-20T15:21:55,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/d9aa355e30814b80acf0df0081e69f95 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/d9aa355e30814b80acf0df0081e69f95 2024-11-20T15:21:55,021 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/d9aa355e30814b80acf0df0081e69f95, entries=150, sequenceid=117, filesize=30.2 K 2024-11-20T15:21:55,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/dfc21d08c27245a1bd06e6e9fea34479 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/dfc21d08c27245a1bd06e6e9fea34479 2024-11-20T15:21:55,031 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/dfc21d08c27245a1bd06e6e9fea34479, entries=150, sequenceid=117, filesize=11.7 K 2024-11-20T15:21:55,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/fb54bea9823543d996341075bcb0a012 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/fb54bea9823543d996341075bcb0a012 2024-11-20T15:21:55,037 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/fb54bea9823543d996341075bcb0a012, entries=150, sequenceid=117, filesize=11.7 K 2024-11-20T15:21:55,038 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 6e74b15d2fcbe758642cc4f5cf813621 in 912ms, sequenceid=117, compaction requested=false 2024-11-20T15:21:55,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:21:55,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:55,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-11-20T15:21:55,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-11-20T15:21:55,042 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-11-20T15:21:55,042 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0680 sec 2024-11-20T15:21:55,044 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 1.0740 sec 2024-11-20T15:21:55,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T15:21:55,078 INFO [Thread-779 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-20T15:21:55,079 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:21:55,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-11-20T15:21:55,081 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:21:55,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T15:21:55,082 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:21:55,082 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:21:55,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T15:21:55,234 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:55,235 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-20T15:21:55,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:55,235 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing 6e74b15d2fcbe758642cc4f5cf813621 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T15:21:55,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=A 2024-11-20T15:21:55,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:55,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=B 2024-11-20T15:21:55,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:55,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=C 2024-11-20T15:21:55,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:55,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209b4e69037e4943dba865f179140dc2d6_6e74b15d2fcbe758642cc4f5cf813621 is 50, key is test_row_0/A:col10/1732116114173/Put/seqid=0 2024-11-20T15:21:55,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742018_1194 (size=12254) 2024-11-20T15:21:55,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:55,297 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:21:55,341 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:55,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116175338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:55,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T15:21:55,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:55,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116175442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:55,593 INFO [master/0b62285ead89:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-20T15:21:55,593 INFO [master/0b62285ead89:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-20T15:21:55,647 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:55,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116175646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:55,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:55,663 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209b4e69037e4943dba865f179140dc2d6_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209b4e69037e4943dba865f179140dc2d6_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:55,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/a3d28bfc4a924d929635b656781f83f9, store: [table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:21:55,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/a3d28bfc4a924d929635b656781f83f9 is 175, key is test_row_0/A:col10/1732116114173/Put/seqid=0 2024-11-20T15:21:55,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742019_1195 (size=31055) 2024-11-20T15:21:55,674 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=133, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/a3d28bfc4a924d929635b656781f83f9 2024-11-20T15:21:55,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/69ad4e6a1bae458b85889f336f40d58b is 50, key is test_row_0/B:col10/1732116114173/Put/seqid=0 2024-11-20T15:21:55,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T15:21:55,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742020_1196 (size=12101) 2024-11-20T15:21:55,690 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/69ad4e6a1bae458b85889f336f40d58b 2024-11-20T15:21:55,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/167c1bbee3d2431998187e1a3079d154 is 50, key is test_row_0/C:col10/1732116114173/Put/seqid=0 2024-11-20T15:21:55,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742021_1197 (size=12101) 2024-11-20T15:21:55,948 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:55,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116175948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:56,108 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/167c1bbee3d2431998187e1a3079d154 2024-11-20T15:21:56,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/a3d28bfc4a924d929635b656781f83f9 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/a3d28bfc4a924d929635b656781f83f9 2024-11-20T15:21:56,119 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/a3d28bfc4a924d929635b656781f83f9, entries=150, sequenceid=133, filesize=30.3 K 2024-11-20T15:21:56,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/69ad4e6a1bae458b85889f336f40d58b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/69ad4e6a1bae458b85889f336f40d58b 2024-11-20T15:21:56,126 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/69ad4e6a1bae458b85889f336f40d58b, entries=150, sequenceid=133, filesize=11.8 K 2024-11-20T15:21:56,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/167c1bbee3d2431998187e1a3079d154 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/167c1bbee3d2431998187e1a3079d154 2024-11-20T15:21:56,133 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/167c1bbee3d2431998187e1a3079d154, entries=150, sequenceid=133, filesize=11.8 K 2024-11-20T15:21:56,134 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 6e74b15d2fcbe758642cc4f5cf813621 in 899ms, sequenceid=133, compaction requested=true 2024-11-20T15:21:56,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:21:56,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:56,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-11-20T15:21:56,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-11-20T15:21:56,137 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-20T15:21:56,137 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0540 sec 2024-11-20T15:21:56,138 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 1.0590 sec 2024-11-20T15:21:56,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T15:21:56,185 INFO [Thread-779 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-11-20T15:21:56,188 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:21:56,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-11-20T15:21:56,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T15:21:56,190 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:21:56,191 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:21:56,191 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:21:56,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T15:21:56,343 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:56,343 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T15:21:56,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:56,343 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing 6e74b15d2fcbe758642cc4f5cf813621 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T15:21:56,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=A 2024-11-20T15:21:56,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:56,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=B 2024-11-20T15:21:56,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:56,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=C 2024-11-20T15:21:56,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:56,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112016a74d769ac94b2dba8d0d3cd4ad6778_6e74b15d2fcbe758642cc4f5cf813621 is 50, key is test_row_0/A:col10/1732116115325/Put/seqid=0 2024-11-20T15:21:56,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742022_1198 (size=12304) 2024-11-20T15:21:56,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:56,378 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112016a74d769ac94b2dba8d0d3cd4ad6778_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112016a74d769ac94b2dba8d0d3cd4ad6778_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:56,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/888d0d3bded34ec7973c1746f0572a2d, store: [table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:21:56,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/888d0d3bded34ec7973c1746f0572a2d is 175, key is test_row_0/A:col10/1732116115325/Put/seqid=0 2024-11-20T15:21:56,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742023_1199 (size=31105) 2024-11-20T15:21:56,388 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=154, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/888d0d3bded34ec7973c1746f0572a2d 2024-11-20T15:21:56,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/c70df523c2e84d29940a00d2d5514cb0 is 50, key is test_row_0/B:col10/1732116115325/Put/seqid=0 2024-11-20T15:21:56,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742024_1200 (size=12151) 2024-11-20T15:21:56,430 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/c70df523c2e84d29940a00d2d5514cb0 2024-11-20T15:21:56,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/f732f2fa31654bd9859ef5828824ea78 is 50, key is test_row_0/C:col10/1732116115325/Put/seqid=0 2024-11-20T15:21:56,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:21:56,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:56,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T15:21:56,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742025_1201 (size=12151) 2024-11-20T15:21:56,500 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/f732f2fa31654bd9859ef5828824ea78 2024-11-20T15:21:56,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/888d0d3bded34ec7973c1746f0572a2d as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/888d0d3bded34ec7973c1746f0572a2d 2024-11-20T15:21:56,518 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:56,518 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/888d0d3bded34ec7973c1746f0572a2d, entries=150, sequenceid=154, filesize=30.4 K 2024-11-20T15:21:56,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116176516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:56,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/c70df523c2e84d29940a00d2d5514cb0 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/c70df523c2e84d29940a00d2d5514cb0 2024-11-20T15:21:56,526 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/c70df523c2e84d29940a00d2d5514cb0, entries=150, sequenceid=154, filesize=11.9 K 2024-11-20T15:21:56,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/f732f2fa31654bd9859ef5828824ea78 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/f732f2fa31654bd9859ef5828824ea78 2024-11-20T15:21:56,534 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/f732f2fa31654bd9859ef5828824ea78, entries=150, sequenceid=154, filesize=11.9 K 2024-11-20T15:21:56,535 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 6e74b15d2fcbe758642cc4f5cf813621 in 192ms, sequenceid=154, compaction requested=true 2024-11-20T15:21:56,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:21:56,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:56,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-11-20T15:21:56,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-11-20T15:21:56,539 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-20T15:21:56,539 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 346 msec 2024-11-20T15:21:56,541 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 351 msec 2024-11-20T15:21:56,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:56,625 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e74b15d2fcbe758642cc4f5cf813621 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T15:21:56,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=A 2024-11-20T15:21:56,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:56,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=B 2024-11-20T15:21:56,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:56,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=C 2024-11-20T15:21:56,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:56,639 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411206ef0d651eac64da7b9ba749381b2218d_6e74b15d2fcbe758642cc4f5cf813621 is 50, key is test_row_0/A:col10/1732116116489/Put/seqid=0 2024-11-20T15:21:56,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742026_1202 (size=12304) 2024-11-20T15:21:56,662 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:56,669 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411206ef0d651eac64da7b9ba749381b2218d_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206ef0d651eac64da7b9ba749381b2218d_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:56,670 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/86c0057a3df5432b9ba4b79fea69eed1, store: [table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:21:56,671 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/86c0057a3df5432b9ba4b79fea69eed1 is 175, key is test_row_0/A:col10/1732116116489/Put/seqid=0 2024-11-20T15:21:56,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742027_1203 (size=31105) 2024-11-20T15:21:56,688 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=170, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/86c0057a3df5432b9ba4b79fea69eed1 2024-11-20T15:21:56,692 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:56,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116176689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:56,706 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/6ca00cebd45746e0af2e6e267ac9ec00 is 50, key is test_row_0/B:col10/1732116116489/Put/seqid=0 2024-11-20T15:21:56,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742028_1204 (size=12151) 2024-11-20T15:21:56,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T15:21:56,793 INFO [Thread-779 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-11-20T15:21:56,795 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:21:56,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees 2024-11-20T15:21:56,796 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:56,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116176794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:56,797 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:21:56,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T15:21:56,798 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:21:56,798 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:21:56,875 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:56,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58982 deadline: 1732116176872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:56,877 DEBUG [Thread-771 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4144 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., hostname=0b62285ead89,33387,1732116069954, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:21:56,879 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:56,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58974 deadline: 1732116176879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:56,880 DEBUG [Thread-773 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4146 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., hostname=0b62285ead89,33387,1732116069954, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:21:56,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:56,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58994 deadline: 1732116176881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:56,887 DEBUG [Thread-775 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4151 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., hostname=0b62285ead89,33387,1732116069954, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:21:56,888 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:56,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58948 deadline: 1732116176888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:56,889 DEBUG [Thread-769 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4154 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., hostname=0b62285ead89,33387,1732116069954, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:21:56,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T15:21:56,952 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:56,953 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-20T15:21:56,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:56,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:21:56,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:56,953 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:56,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:56,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:57,002 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:57,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116177001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:57,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T15:21:57,107 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:57,107 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-20T15:21:57,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:57,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:21:57,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:57,107 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:57,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:57,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:21:57,114 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/6ca00cebd45746e0af2e6e267ac9ec00 2024-11-20T15:21:57,131 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/a1ed096e1b1045feb5e3f7510087a115 is 50, key is test_row_0/C:col10/1732116116489/Put/seqid=0 2024-11-20T15:21:57,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742029_1205 (size=12151) 2024-11-20T15:21:57,140 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/a1ed096e1b1045feb5e3f7510087a115 2024-11-20T15:21:57,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/86c0057a3df5432b9ba4b79fea69eed1 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/86c0057a3df5432b9ba4b79fea69eed1 2024-11-20T15:21:57,149 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/86c0057a3df5432b9ba4b79fea69eed1, entries=150, sequenceid=170, filesize=30.4 K 2024-11-20T15:21:57,151 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/6ca00cebd45746e0af2e6e267ac9ec00 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/6ca00cebd45746e0af2e6e267ac9ec00 2024-11-20T15:21:57,156 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/6ca00cebd45746e0af2e6e267ac9ec00, entries=150, sequenceid=170, filesize=11.9 K 2024-11-20T15:21:57,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/a1ed096e1b1045feb5e3f7510087a115 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/a1ed096e1b1045feb5e3f7510087a115 2024-11-20T15:21:57,162 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/a1ed096e1b1045feb5e3f7510087a115, entries=150, sequenceid=170, filesize=11.9 K 2024-11-20T15:21:57,164 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 6e74b15d2fcbe758642cc4f5cf813621 in 539ms, sequenceid=170, compaction requested=true 2024-11-20T15:21:57,164 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:21:57,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e74b15d2fcbe758642cc4f5cf813621:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:21:57,164 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T15:21:57,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:57,164 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T15:21:57,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e74b15d2fcbe758642cc4f5cf813621:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:21:57,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:57,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e74b15d2fcbe758642cc4f5cf813621:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:21:57,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:57,166 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 155381 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T15:21:57,167 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 6e74b15d2fcbe758642cc4f5cf813621/A is initiating minor compaction (all files) 2024-11-20T15:21:57,167 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e74b15d2fcbe758642cc4f5cf813621/A in TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:57,167 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/16fea317fd7d42b3a8540ec28090d00c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/d9aa355e30814b80acf0df0081e69f95, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/a3d28bfc4a924d929635b656781f83f9, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/888d0d3bded34ec7973c1746f0572a2d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/86c0057a3df5432b9ba4b79fea69eed1] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp, totalSize=151.7 K 2024-11-20T15:21:57,167 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=11 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:57,167 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. files: [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/16fea317fd7d42b3a8540ec28090d00c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/d9aa355e30814b80acf0df0081e69f95, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/a3d28bfc4a924d929635b656781f83f9, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/888d0d3bded34ec7973c1746f0572a2d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/86c0057a3df5432b9ba4b79fea69eed1] 2024-11-20T15:21:57,167 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60611 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T15:21:57,167 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e74b15d2fcbe758642cc4f5cf813621/B is initiating minor compaction (all files) 2024-11-20T15:21:57,167 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 16fea317fd7d42b3a8540ec28090d00c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732116112731 2024-11-20T15:21:57,167 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e74b15d2fcbe758642cc4f5cf813621/B in TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:57,168 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/6551b5b064ec4bd3946b484d642a9c47, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/dfc21d08c27245a1bd06e6e9fea34479, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/69ad4e6a1bae458b85889f336f40d58b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/c70df523c2e84d29940a00d2d5514cb0, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/6ca00cebd45746e0af2e6e267ac9ec00] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp, totalSize=59.2 K 2024-11-20T15:21:57,168 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9aa355e30814b80acf0df0081e69f95, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732116113802 2024-11-20T15:21:57,168 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 6551b5b064ec4bd3946b484d642a9c47, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732116112731 2024-11-20T15:21:57,169 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting a3d28bfc4a924d929635b656781f83f9, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732116114173 2024-11-20T15:21:57,169 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting dfc21d08c27245a1bd06e6e9fea34479, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732116113802 2024-11-20T15:21:57,169 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 69ad4e6a1bae458b85889f336f40d58b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732116114173 2024-11-20T15:21:57,169 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 888d0d3bded34ec7973c1746f0572a2d, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732116115325 2024-11-20T15:21:57,170 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting c70df523c2e84d29940a00d2d5514cb0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732116115325 2024-11-20T15:21:57,170 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 86c0057a3df5432b9ba4b79fea69eed1, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732116116489 2024-11-20T15:21:57,170 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ca00cebd45746e0af2e6e267ac9ec00, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732116116489 2024-11-20T15:21:57,180 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:21:57,192 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e74b15d2fcbe758642cc4f5cf813621#B#compaction#178 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:57,193 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/7e493d94736b48369c18a3ff8e5e82f7 is 50, key is test_row_0/B:col10/1732116116489/Put/seqid=0 2024-11-20T15:21:57,194 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120e64c54357b094b8baa505780e51258b5_6e74b15d2fcbe758642cc4f5cf813621 store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:21:57,197 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120e64c54357b094b8baa505780e51258b5_6e74b15d2fcbe758642cc4f5cf813621, store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:21:57,197 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e64c54357b094b8baa505780e51258b5_6e74b15d2fcbe758642cc4f5cf813621 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:21:57,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742030_1206 (size=12527) 2024-11-20T15:21:57,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742031_1207 (size=4469) 2024-11-20T15:21:57,209 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e74b15d2fcbe758642cc4f5cf813621#A#compaction#177 average throughput is 0.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:57,209 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/7d4deeb8ba6a49489dcd3453280dc718 is 175, key is test_row_0/A:col10/1732116116489/Put/seqid=0 2024-11-20T15:21:57,210 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/7e493d94736b48369c18a3ff8e5e82f7 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/7e493d94736b48369c18a3ff8e5e82f7 2024-11-20T15:21:57,216 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 6e74b15d2fcbe758642cc4f5cf813621/B of 6e74b15d2fcbe758642cc4f5cf813621 into 7e493d94736b48369c18a3ff8e5e82f7(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:57,216 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:21:57,216 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., storeName=6e74b15d2fcbe758642cc4f5cf813621/B, priority=11, startTime=1732116117164; duration=0sec 2024-11-20T15:21:57,216 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:21:57,216 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e74b15d2fcbe758642cc4f5cf813621:B 2024-11-20T15:21:57,216 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T15:21:57,218 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60611 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T15:21:57,218 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e74b15d2fcbe758642cc4f5cf813621/C is initiating minor compaction (all files) 2024-11-20T15:21:57,218 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e74b15d2fcbe758642cc4f5cf813621/C in TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:57,218 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/6c16f73ba2ba40c08ca03628bca6b784, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/fb54bea9823543d996341075bcb0a012, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/167c1bbee3d2431998187e1a3079d154, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/f732f2fa31654bd9859ef5828824ea78, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/a1ed096e1b1045feb5e3f7510087a115] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp, totalSize=59.2 K 2024-11-20T15:21:57,218 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c16f73ba2ba40c08ca03628bca6b784, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732116112731 2024-11-20T15:21:57,219 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting fb54bea9823543d996341075bcb0a012, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732116113802 2024-11-20T15:21:57,219 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 167c1bbee3d2431998187e1a3079d154, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732116114173 2024-11-20T15:21:57,220 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting f732f2fa31654bd9859ef5828824ea78, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732116115325 2024-11-20T15:21:57,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742032_1208 (size=31481) 2024-11-20T15:21:57,221 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting a1ed096e1b1045feb5e3f7510087a115, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732116116489 2024-11-20T15:21:57,226 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/7d4deeb8ba6a49489dcd3453280dc718 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/7d4deeb8ba6a49489dcd3453280dc718 2024-11-20T15:21:57,234 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 6e74b15d2fcbe758642cc4f5cf813621/A of 6e74b15d2fcbe758642cc4f5cf813621 into 7d4deeb8ba6a49489dcd3453280dc718(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:57,234 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:21:57,234 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., storeName=6e74b15d2fcbe758642cc4f5cf813621/A, priority=11, startTime=1732116117164; duration=0sec 2024-11-20T15:21:57,235 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:57,235 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e74b15d2fcbe758642cc4f5cf813621:A 2024-11-20T15:21:57,247 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e74b15d2fcbe758642cc4f5cf813621#C#compaction#179 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:21:57,248 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/6ccc06ffce16481e958d94022e74bf76 is 50, key is test_row_0/C:col10/1732116116489/Put/seqid=0 2024-11-20T15:21:57,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742033_1209 (size=12527) 2024-11-20T15:21:57,262 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:57,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-20T15:21:57,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:57,263 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2837): Flushing 6e74b15d2fcbe758642cc4f5cf813621 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T15:21:57,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=A 2024-11-20T15:21:57,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:57,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=B 2024-11-20T15:21:57,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:57,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=C 2024-11-20T15:21:57,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:57,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202cbc857d22184708b9794c8baa6b68ab_6e74b15d2fcbe758642cc4f5cf813621 is 50, key is test_row_0/A:col10/1732116116679/Put/seqid=0 2024-11-20T15:21:57,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742034_1210 (size=12304) 2024-11-20T15:21:57,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:57,293 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202cbc857d22184708b9794c8baa6b68ab_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202cbc857d22184708b9794c8baa6b68ab_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:57,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/0bc1d78a18aa4b77a588f96307b7feb7, store: [table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:21:57,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/0bc1d78a18aa4b77a588f96307b7feb7 is 175, key is test_row_0/A:col10/1732116116679/Put/seqid=0 2024-11-20T15:21:57,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742035_1211 (size=31105) 2024-11-20T15:21:57,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:57,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:21:57,339 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:57,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116177337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:57,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T15:21:57,441 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:57,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116177441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:57,645 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:57,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116177642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:57,661 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/6ccc06ffce16481e958d94022e74bf76 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/6ccc06ffce16481e958d94022e74bf76 2024-11-20T15:21:57,667 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 6e74b15d2fcbe758642cc4f5cf813621/C of 6e74b15d2fcbe758642cc4f5cf813621 into 6ccc06ffce16481e958d94022e74bf76(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:21:57,667 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:21:57,667 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., storeName=6e74b15d2fcbe758642cc4f5cf813621/C, priority=11, startTime=1732116117164; duration=0sec 2024-11-20T15:21:57,668 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:21:57,668 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e74b15d2fcbe758642cc4f5cf813621:C 2024-11-20T15:21:57,701 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=192, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/0bc1d78a18aa4b77a588f96307b7feb7 2024-11-20T15:21:57,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/d1d76b7118664216b63c3d9577a335a3 is 50, key is test_row_0/B:col10/1732116116679/Put/seqid=0 2024-11-20T15:21:57,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742036_1212 (size=12151) 2024-11-20T15:21:57,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T15:21:57,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:57,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116177948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:58,118 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/d1d76b7118664216b63c3d9577a335a3 2024-11-20T15:21:58,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/1b4955a9922045478b89ddfd53c5871e is 50, key is test_row_0/C:col10/1732116116679/Put/seqid=0 2024-11-20T15:21:58,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742037_1213 (size=12151) 2024-11-20T15:21:58,452 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:58,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116178452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:58,543 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/1b4955a9922045478b89ddfd53c5871e 2024-11-20T15:21:58,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/0bc1d78a18aa4b77a588f96307b7feb7 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/0bc1d78a18aa4b77a588f96307b7feb7 2024-11-20T15:21:58,557 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/0bc1d78a18aa4b77a588f96307b7feb7, entries=150, sequenceid=192, filesize=30.4 K 2024-11-20T15:21:58,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/d1d76b7118664216b63c3d9577a335a3 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/d1d76b7118664216b63c3d9577a335a3 2024-11-20T15:21:58,564 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/d1d76b7118664216b63c3d9577a335a3, entries=150, sequenceid=192, filesize=11.9 K 2024-11-20T15:21:58,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/1b4955a9922045478b89ddfd53c5871e as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/1b4955a9922045478b89ddfd53c5871e 2024-11-20T15:21:58,572 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/1b4955a9922045478b89ddfd53c5871e, entries=150, sequenceid=192, filesize=11.9 K 2024-11-20T15:21:58,576 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 6e74b15d2fcbe758642cc4f5cf813621 in 1314ms, sequenceid=192, compaction requested=false 2024-11-20T15:21:58,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2538): Flush status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:21:58,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:58,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=64 2024-11-20T15:21:58,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=64 2024-11-20T15:21:58,580 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-11-20T15:21:58,580 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7800 sec 2024-11-20T15:21:58,581 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees in 1.7850 sec 2024-11-20T15:21:58,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T15:21:58,902 INFO [Thread-779 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-11-20T15:21:58,904 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:21:58,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees 2024-11-20T15:21:58,906 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:21:58,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-20T15:21:58,908 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:21:58,908 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:21:59,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-20T15:21:59,060 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:21:59,061 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-11-20T15:21:59,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:21:59,061 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2837): Flushing 6e74b15d2fcbe758642cc4f5cf813621 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T15:21:59,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=A 2024-11-20T15:21:59,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:59,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=B 2024-11-20T15:21:59,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:59,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=C 2024-11-20T15:21:59,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:21:59,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120eae6ad0db24f4291bd6c209a9faca2d0_6e74b15d2fcbe758642cc4f5cf813621 is 50, key is test_row_0/A:col10/1732116117334/Put/seqid=0 2024-11-20T15:21:59,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742038_1214 (size=12304) 2024-11-20T15:21:59,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-20T15:21:59,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:59,461 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:21:59,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:21:59,479 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120eae6ad0db24f4291bd6c209a9faca2d0_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120eae6ad0db24f4291bd6c209a9faca2d0_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:21:59,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/2026d6e5849a44dc8e0ba562a4a19af6, store: [table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:21:59,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/2026d6e5849a44dc8e0ba562a4a19af6 is 175, key is test_row_0/A:col10/1732116117334/Put/seqid=0 2024-11-20T15:21:59,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742039_1215 (size=31105) 2024-11-20T15:21:59,488 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=209, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/2026d6e5849a44dc8e0ba562a4a19af6 2024-11-20T15:21:59,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/b64b2e48bde04a5a8f46c78f0eae5d11 is 50, key is test_row_0/B:col10/1732116117334/Put/seqid=0 2024-11-20T15:21:59,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742040_1216 (size=12151) 2024-11-20T15:21:59,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-20T15:21:59,511 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:59,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116179510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:59,615 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:59,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116179612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:59,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:21:59,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116179816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:21:59,906 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/b64b2e48bde04a5a8f46c78f0eae5d11 2024-11-20T15:21:59,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/d4c3b85b721d41f29cff6bc1416b948b is 50, key is test_row_0/C:col10/1732116117334/Put/seqid=0 2024-11-20T15:21:59,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742041_1217 (size=12151) 2024-11-20T15:22:00,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-20T15:22:00,120 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:00,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116180119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:00,352 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/d4c3b85b721d41f29cff6bc1416b948b 2024-11-20T15:22:00,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/2026d6e5849a44dc8e0ba562a4a19af6 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/2026d6e5849a44dc8e0ba562a4a19af6 2024-11-20T15:22:00,362 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/2026d6e5849a44dc8e0ba562a4a19af6, entries=150, sequenceid=209, filesize=30.4 K 2024-11-20T15:22:00,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/b64b2e48bde04a5a8f46c78f0eae5d11 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/b64b2e48bde04a5a8f46c78f0eae5d11 2024-11-20T15:22:00,368 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/b64b2e48bde04a5a8f46c78f0eae5d11, entries=150, sequenceid=209, filesize=11.9 K 2024-11-20T15:22:00,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/d4c3b85b721d41f29cff6bc1416b948b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/d4c3b85b721d41f29cff6bc1416b948b 2024-11-20T15:22:00,375 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/d4c3b85b721d41f29cff6bc1416b948b, entries=150, sequenceid=209, filesize=11.9 K 2024-11-20T15:22:00,376 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 6e74b15d2fcbe758642cc4f5cf813621 in 1315ms, sequenceid=209, compaction requested=true 2024-11-20T15:22:00,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2538): Flush status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:22:00,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:00,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=66 2024-11-20T15:22:00,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=66 2024-11-20T15:22:00,379 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-11-20T15:22:00,379 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4700 sec 2024-11-20T15:22:00,381 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees in 1.4760 sec 2024-11-20T15:22:00,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:00,628 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e74b15d2fcbe758642cc4f5cf813621 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T15:22:00,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=A 2024-11-20T15:22:00,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:00,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=B 2024-11-20T15:22:00,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:00,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=C 2024-11-20T15:22:00,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:00,642 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202f9564dd17a341ab9d385c13f77707b6_6e74b15d2fcbe758642cc4f5cf813621 is 50, key is test_row_0/A:col10/1732116119477/Put/seqid=0 2024-11-20T15:22:00,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742042_1218 (size=12304) 2024-11-20T15:22:00,664 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:00,670 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202f9564dd17a341ab9d385c13f77707b6_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202f9564dd17a341ab9d385c13f77707b6_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:00,672 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/56dec9975c9740ae83b9eb80a559c810, store: [table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:22:00,672 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/56dec9975c9740ae83b9eb80a559c810 is 175, key is test_row_0/A:col10/1732116119477/Put/seqid=0 2024-11-20T15:22:00,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:00,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116180676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:00,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742043_1219 (size=31105) 2024-11-20T15:22:00,689 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=230, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/56dec9975c9740ae83b9eb80a559c810 2024-11-20T15:22:00,699 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/33360f8b9b6f4965963105120e200617 is 50, key is test_row_0/B:col10/1732116119477/Put/seqid=0 2024-11-20T15:22:00,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742044_1220 (size=12151) 2024-11-20T15:22:00,719 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/33360f8b9b6f4965963105120e200617 2024-11-20T15:22:00,730 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/f605331e0e874811aae067ce1c92ce32 is 50, key is test_row_0/C:col10/1732116119477/Put/seqid=0 2024-11-20T15:22:00,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742045_1221 (size=12151) 2024-11-20T15:22:00,741 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/f605331e0e874811aae067ce1c92ce32 2024-11-20T15:22:00,747 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/56dec9975c9740ae83b9eb80a559c810 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/56dec9975c9740ae83b9eb80a559c810 2024-11-20T15:22:00,753 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/56dec9975c9740ae83b9eb80a559c810, entries=150, sequenceid=230, filesize=30.4 K 2024-11-20T15:22:00,754 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/33360f8b9b6f4965963105120e200617 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/33360f8b9b6f4965963105120e200617 2024-11-20T15:22:00,758 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/33360f8b9b6f4965963105120e200617, entries=150, sequenceid=230, filesize=11.9 K 2024-11-20T15:22:00,759 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/f605331e0e874811aae067ce1c92ce32 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/f605331e0e874811aae067ce1c92ce32 2024-11-20T15:22:00,765 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/f605331e0e874811aae067ce1c92ce32, entries=150, sequenceid=230, filesize=11.9 K 2024-11-20T15:22:00,766 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 6e74b15d2fcbe758642cc4f5cf813621 in 138ms, sequenceid=230, compaction requested=true 2024-11-20T15:22:00,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:22:00,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e74b15d2fcbe758642cc4f5cf813621:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:22:00,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:00,767 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:22:00,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e74b15d2fcbe758642cc4f5cf813621:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:22:00,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:00,767 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:22:00,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e74b15d2fcbe758642cc4f5cf813621:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:22:00,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:00,768 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 124796 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:22:00,768 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48980 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:22:00,768 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 6e74b15d2fcbe758642cc4f5cf813621/A is initiating minor compaction (all files) 2024-11-20T15:22:00,769 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e74b15d2fcbe758642cc4f5cf813621/B is initiating minor compaction (all files) 2024-11-20T15:22:00,769 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e74b15d2fcbe758642cc4f5cf813621/A in TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:00,769 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e74b15d2fcbe758642cc4f5cf813621/B in TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:00,769 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/7e493d94736b48369c18a3ff8e5e82f7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/d1d76b7118664216b63c3d9577a335a3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/b64b2e48bde04a5a8f46c78f0eae5d11, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/33360f8b9b6f4965963105120e200617] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp, totalSize=47.8 K 2024-11-20T15:22:00,769 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/7d4deeb8ba6a49489dcd3453280dc718, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/0bc1d78a18aa4b77a588f96307b7feb7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/2026d6e5849a44dc8e0ba562a4a19af6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/56dec9975c9740ae83b9eb80a559c810] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp, totalSize=121.9 K 2024-11-20T15:22:00,769 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:00,769 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. files: [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/7d4deeb8ba6a49489dcd3453280dc718, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/0bc1d78a18aa4b77a588f96307b7feb7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/2026d6e5849a44dc8e0ba562a4a19af6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/56dec9975c9740ae83b9eb80a559c810] 2024-11-20T15:22:00,770 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e493d94736b48369c18a3ff8e5e82f7, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732116116489 2024-11-20T15:22:00,770 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7d4deeb8ba6a49489dcd3453280dc718, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732116116489 2024-11-20T15:22:00,770 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0bc1d78a18aa4b77a588f96307b7feb7, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1732116116679 2024-11-20T15:22:00,770 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting d1d76b7118664216b63c3d9577a335a3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1732116116679 2024-11-20T15:22:00,770 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting b64b2e48bde04a5a8f46c78f0eae5d11, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732116117331 2024-11-20T15:22:00,770 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2026d6e5849a44dc8e0ba562a4a19af6, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732116117331 2024-11-20T15:22:00,771 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 33360f8b9b6f4965963105120e200617, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1732116119477 2024-11-20T15:22:00,771 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 56dec9975c9740ae83b9eb80a559c810, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1732116119477 2024-11-20T15:22:00,780 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:22:00,787 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e74b15d2fcbe758642cc4f5cf813621#B#compaction#190 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:00,787 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/51cd20524bc24ecaadce948884513108 is 50, key is test_row_0/B:col10/1732116119477/Put/seqid=0 2024-11-20T15:22:00,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:00,788 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e74b15d2fcbe758642cc4f5cf813621 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T15:22:00,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=A 2024-11-20T15:22:00,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:00,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=B 2024-11-20T15:22:00,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:00,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=C 2024-11-20T15:22:00,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:00,797 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120dfaa4c3eeff6413cabba42697f3c2b02_6e74b15d2fcbe758642cc4f5cf813621 store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:22:00,799 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120dfaa4c3eeff6413cabba42697f3c2b02_6e74b15d2fcbe758642cc4f5cf813621, store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:22:00,800 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120dfaa4c3eeff6413cabba42697f3c2b02_6e74b15d2fcbe758642cc4f5cf813621 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:22:00,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742047_1223 (size=4469) 2024-11-20T15:22:00,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742046_1222 (size=12663) 2024-11-20T15:22:00,866 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ebe08a856e224cefafd29359969263e2_6e74b15d2fcbe758642cc4f5cf813621 is 50, key is test_row_0/A:col10/1732116120665/Put/seqid=0 2024-11-20T15:22:00,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742048_1224 (size=14794) 2024-11-20T15:22:00,874 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:00,879 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ebe08a856e224cefafd29359969263e2_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ebe08a856e224cefafd29359969263e2_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:00,881 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/18d3c15711914234a42c6cc1b0421b16, store: [table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:22:00,881 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/18d3c15711914234a42c6cc1b0421b16 is 175, key is test_row_0/A:col10/1732116120665/Put/seqid=0 2024-11-20T15:22:00,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:00,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116180877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:00,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:00,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58982 deadline: 1732116180888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:00,893 DEBUG [Thread-771 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8161 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., hostname=0b62285ead89,33387,1732116069954, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:22:00,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742049_1225 (size=39749) 2024-11-20T15:22:00,903 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=246, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/18d3c15711914234a42c6cc1b0421b16 2024-11-20T15:22:00,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:00,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58974 deadline: 1732116180899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:00,906 DEBUG [Thread-773 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8172 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., hostname=0b62285ead89,33387,1732116069954, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:22:00,917 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:00,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58994 deadline: 1732116180913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:00,917 DEBUG [Thread-775 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8181 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., hostname=0b62285ead89,33387,1732116069954, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:22:00,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:00,922 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/e51546b10a814d31919e6671623a8d49 is 50, key is test_row_0/B:col10/1732116120665/Put/seqid=0 2024-11-20T15:22:00,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58948 deadline: 1732116180919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:00,922 DEBUG [Thread-769 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8187 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., hostname=0b62285ead89,33387,1732116069954, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:22:00,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742050_1226 (size=12151) 2024-11-20T15:22:00,987 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:00,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116180986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:01,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-20T15:22:01,012 INFO [Thread-779 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 65 completed 2024-11-20T15:22:01,013 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:22:01,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees 2024-11-20T15:22:01,015 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:22:01,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T15:22:01,016 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:22:01,016 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:22:01,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T15:22:01,168 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:01,169 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T15:22:01,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:01,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:22:01,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:01,169 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:01,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:01,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:01,191 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:01,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116181190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:01,260 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e74b15d2fcbe758642cc4f5cf813621#A#compaction#189 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:01,261 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/6721c44fd03a45d29661d2bdf59876ce is 175, key is test_row_0/A:col10/1732116119477/Put/seqid=0 2024-11-20T15:22:01,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742051_1227 (size=31617) 2024-11-20T15:22:01,272 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/51cd20524bc24ecaadce948884513108 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/51cd20524bc24ecaadce948884513108 2024-11-20T15:22:01,280 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6e74b15d2fcbe758642cc4f5cf813621/B of 6e74b15d2fcbe758642cc4f5cf813621 into 51cd20524bc24ecaadce948884513108(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:01,280 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:22:01,280 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., storeName=6e74b15d2fcbe758642cc4f5cf813621/B, priority=12, startTime=1732116120767; duration=0sec 2024-11-20T15:22:01,280 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:01,280 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e74b15d2fcbe758642cc4f5cf813621:B 2024-11-20T15:22:01,280 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:22:01,283 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48980 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:22:01,283 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e74b15d2fcbe758642cc4f5cf813621/C is initiating minor compaction (all files) 2024-11-20T15:22:01,283 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e74b15d2fcbe758642cc4f5cf813621/C in TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:01,283 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/6ccc06ffce16481e958d94022e74bf76, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/1b4955a9922045478b89ddfd53c5871e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/d4c3b85b721d41f29cff6bc1416b948b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/f605331e0e874811aae067ce1c92ce32] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp, totalSize=47.8 K 2024-11-20T15:22:01,284 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ccc06ffce16481e958d94022e74bf76, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732116116489 2024-11-20T15:22:01,284 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b4955a9922045478b89ddfd53c5871e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1732116116679 2024-11-20T15:22:01,285 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting d4c3b85b721d41f29cff6bc1416b948b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732116117331 2024-11-20T15:22:01,285 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting f605331e0e874811aae067ce1c92ce32, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1732116119477 2024-11-20T15:22:01,296 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e74b15d2fcbe758642cc4f5cf813621#C#compaction#193 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:01,297 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/5363fefa8c274a19b4d4fd39a3455002 is 50, key is test_row_0/C:col10/1732116119477/Put/seqid=0 2024-11-20T15:22:01,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742052_1228 (size=12663) 2024-11-20T15:22:01,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T15:22:01,322 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:01,322 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T15:22:01,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:01,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:22:01,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:01,323 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:01,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:01,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:01,331 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/e51546b10a814d31919e6671623a8d49 2024-11-20T15:22:01,339 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/d1fa9b5f76a948439adf390a0f5f1ded is 50, key is test_row_0/C:col10/1732116120665/Put/seqid=0 2024-11-20T15:22:01,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742053_1229 (size=12151) 2024-11-20T15:22:01,475 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:01,475 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T15:22:01,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:01,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:22:01,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:01,476 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:01,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:01,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:01,496 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:01,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116181494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:01,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T15:22:01,628 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:01,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T15:22:01,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:01,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:22:01,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:01,629 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:01,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:01,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:01,673 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/6721c44fd03a45d29661d2bdf59876ce as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/6721c44fd03a45d29661d2bdf59876ce 2024-11-20T15:22:01,678 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6e74b15d2fcbe758642cc4f5cf813621/A of 6e74b15d2fcbe758642cc4f5cf813621 into 6721c44fd03a45d29661d2bdf59876ce(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:01,678 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:22:01,678 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., storeName=6e74b15d2fcbe758642cc4f5cf813621/A, priority=12, startTime=1732116120767; duration=0sec 2024-11-20T15:22:01,679 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:01,679 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e74b15d2fcbe758642cc4f5cf813621:A 2024-11-20T15:22:01,711 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/5363fefa8c274a19b4d4fd39a3455002 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/5363fefa8c274a19b4d4fd39a3455002 2024-11-20T15:22:01,717 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6e74b15d2fcbe758642cc4f5cf813621/C of 6e74b15d2fcbe758642cc4f5cf813621 into 5363fefa8c274a19b4d4fd39a3455002(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:01,717 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:22:01,717 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., storeName=6e74b15d2fcbe758642cc4f5cf813621/C, priority=12, startTime=1732116120767; duration=0sec 2024-11-20T15:22:01,717 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:01,717 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e74b15d2fcbe758642cc4f5cf813621:C 2024-11-20T15:22:01,744 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/d1fa9b5f76a948439adf390a0f5f1ded 2024-11-20T15:22:01,750 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/18d3c15711914234a42c6cc1b0421b16 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/18d3c15711914234a42c6cc1b0421b16 2024-11-20T15:22:01,754 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/18d3c15711914234a42c6cc1b0421b16, entries=200, sequenceid=246, filesize=38.8 K 2024-11-20T15:22:01,755 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/e51546b10a814d31919e6671623a8d49 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/e51546b10a814d31919e6671623a8d49 2024-11-20T15:22:01,760 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/e51546b10a814d31919e6671623a8d49, entries=150, sequenceid=246, filesize=11.9 K 2024-11-20T15:22:01,761 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/d1fa9b5f76a948439adf390a0f5f1ded as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/d1fa9b5f76a948439adf390a0f5f1ded 2024-11-20T15:22:01,765 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/d1fa9b5f76a948439adf390a0f5f1ded, entries=150, sequenceid=246, filesize=11.9 K 2024-11-20T15:22:01,766 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 6e74b15d2fcbe758642cc4f5cf813621 in 978ms, sequenceid=246, compaction requested=false 2024-11-20T15:22:01,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:22:01,781 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:01,781 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T15:22:01,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:01,782 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2837): Flushing 6e74b15d2fcbe758642cc4f5cf813621 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T15:22:01,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=A 2024-11-20T15:22:01,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:01,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=B 2024-11-20T15:22:01,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:01,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=C 2024-11-20T15:22:01,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:01,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203e070a91001b4bf08df4468a1feef0b9_6e74b15d2fcbe758642cc4f5cf813621 is 50, key is test_row_0/A:col10/1732116120848/Put/seqid=0 2024-11-20T15:22:01,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742054_1230 (size=12404) 2024-11-20T15:22:02,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:02,002 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:22:02,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:02,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116182027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:02,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T15:22:02,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:02,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116182132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:02,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:02,199 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203e070a91001b4bf08df4468a1feef0b9_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203e070a91001b4bf08df4468a1feef0b9_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:02,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/5b5855a0e5cb40d19fdff5e737b43ebd, store: [table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:22:02,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/5b5855a0e5cb40d19fdff5e737b43ebd is 175, key is test_row_0/A:col10/1732116120848/Put/seqid=0 2024-11-20T15:22:02,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742055_1231 (size=31205) 2024-11-20T15:22:02,333 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:02,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116182333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:02,605 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=269, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/5b5855a0e5cb40d19fdff5e737b43ebd 2024-11-20T15:22:02,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/c5299258c235490285382b40e8312864 is 50, key is test_row_0/B:col10/1732116120848/Put/seqid=0 2024-11-20T15:22:02,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742056_1232 (size=12251) 2024-11-20T15:22:02,636 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:02,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116182634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:03,019 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=269 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/c5299258c235490285382b40e8312864 2024-11-20T15:22:03,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/ebb9e112046b4f6f8a682d7a2ae4ed94 is 50, key is test_row_0/C:col10/1732116120848/Put/seqid=0 2024-11-20T15:22:03,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742057_1233 (size=12251) 2024-11-20T15:22:03,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T15:22:03,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:03,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116183139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:03,441 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=269 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/ebb9e112046b4f6f8a682d7a2ae4ed94 2024-11-20T15:22:03,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/5b5855a0e5cb40d19fdff5e737b43ebd as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/5b5855a0e5cb40d19fdff5e737b43ebd 2024-11-20T15:22:03,451 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/5b5855a0e5cb40d19fdff5e737b43ebd, entries=150, sequenceid=269, filesize=30.5 K 2024-11-20T15:22:03,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/c5299258c235490285382b40e8312864 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/c5299258c235490285382b40e8312864 2024-11-20T15:22:03,456 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/c5299258c235490285382b40e8312864, entries=150, sequenceid=269, filesize=12.0 K 2024-11-20T15:22:03,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-20T15:22:03,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/ebb9e112046b4f6f8a682d7a2ae4ed94 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/ebb9e112046b4f6f8a682d7a2ae4ed94 2024-11-20T15:22:03,462 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/ebb9e112046b4f6f8a682d7a2ae4ed94, entries=150, sequenceid=269, filesize=12.0 K 2024-11-20T15:22:03,463 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 6e74b15d2fcbe758642cc4f5cf813621 in 1681ms, sequenceid=269, compaction requested=true 2024-11-20T15:22:03,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2538): Flush status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:22:03,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:03,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-11-20T15:22:03,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=68 2024-11-20T15:22:03,466 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-11-20T15:22:03,466 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4480 sec 2024-11-20T15:22:03,467 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees in 2.4530 sec 2024-11-20T15:22:04,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:04,152 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e74b15d2fcbe758642cc4f5cf813621 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T15:22:04,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=A 2024-11-20T15:22:04,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:04,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=B 2024-11-20T15:22:04,153 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:04,153 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=C 2024-11-20T15:22:04,153 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:04,160 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120901386c19e534ef6a973691dd0b4e8a6_6e74b15d2fcbe758642cc4f5cf813621 is 50, key is test_row_0/A:col10/1732116122026/Put/seqid=0 2024-11-20T15:22:04,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742058_1234 (size=14994) 2024-11-20T15:22:04,170 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:04,174 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120901386c19e534ef6a973691dd0b4e8a6_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120901386c19e534ef6a973691dd0b4e8a6_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:04,175 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/285b1029a02c488b85871edf0fcf3e25, store: [table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:22:04,175 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/285b1029a02c488b85871edf0fcf3e25 is 175, key is test_row_0/A:col10/1732116122026/Put/seqid=0 2024-11-20T15:22:04,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742059_1235 (size=39949) 2024-11-20T15:22:04,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:04,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 273 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116184205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:04,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:04,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 275 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116184309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:04,514 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:04,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 277 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116184513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:04,581 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=286, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/285b1029a02c488b85871edf0fcf3e25 2024-11-20T15:22:04,589 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/dc0904a8fba44585ae5d65238b860085 is 50, key is test_row_0/B:col10/1732116122026/Put/seqid=0 2024-11-20T15:22:04,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742060_1236 (size=12301) 2024-11-20T15:22:04,818 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:04,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 279 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116184817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:04,997 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/dc0904a8fba44585ae5d65238b860085 2024-11-20T15:22:05,004 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/e317d80acb6548539815b497a4157f75 is 50, key is test_row_0/C:col10/1732116122026/Put/seqid=0 2024-11-20T15:22:05,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742061_1237 (size=12301) 2024-11-20T15:22:05,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T15:22:05,121 INFO [Thread-779 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-11-20T15:22:05,122 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:22:05,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees 2024-11-20T15:22:05,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T15:22:05,124 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:22:05,124 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:22:05,124 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:22:05,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T15:22:05,276 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:05,276 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T15:22:05,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:05,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:22:05,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:05,277 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:05,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:05,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:05,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:05,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 281 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116185321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:05,409 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/e317d80acb6548539815b497a4157f75 2024-11-20T15:22:05,416 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/285b1029a02c488b85871edf0fcf3e25 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/285b1029a02c488b85871edf0fcf3e25 2024-11-20T15:22:05,425 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/285b1029a02c488b85871edf0fcf3e25, entries=200, sequenceid=286, filesize=39.0 K 2024-11-20T15:22:05,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T15:22:05,426 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/dc0904a8fba44585ae5d65238b860085 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/dc0904a8fba44585ae5d65238b860085 2024-11-20T15:22:05,429 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:05,430 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T15:22:05,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:05,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:22:05,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:05,430 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:05,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:05,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:05,432 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/dc0904a8fba44585ae5d65238b860085, entries=150, sequenceid=286, filesize=12.0 K 2024-11-20T15:22:05,433 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/e317d80acb6548539815b497a4157f75 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/e317d80acb6548539815b497a4157f75 2024-11-20T15:22:05,441 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/e317d80acb6548539815b497a4157f75, entries=150, sequenceid=286, filesize=12.0 K 2024-11-20T15:22:05,442 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 6e74b15d2fcbe758642cc4f5cf813621 in 1290ms, sequenceid=286, compaction requested=true 2024-11-20T15:22:05,442 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:22:05,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e74b15d2fcbe758642cc4f5cf813621:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:22:05,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:05,443 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:22:05,443 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:22:05,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e74b15d2fcbe758642cc4f5cf813621:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:22:05,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:05,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e74b15d2fcbe758642cc4f5cf813621:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:22:05,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:05,444 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 142520 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:22:05,444 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 6e74b15d2fcbe758642cc4f5cf813621/A is initiating minor compaction (all files) 2024-11-20T15:22:05,444 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e74b15d2fcbe758642cc4f5cf813621/A in TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:05,445 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/6721c44fd03a45d29661d2bdf59876ce, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/18d3c15711914234a42c6cc1b0421b16, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/5b5855a0e5cb40d19fdff5e737b43ebd, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/285b1029a02c488b85871edf0fcf3e25] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp, totalSize=139.2 K 2024-11-20T15:22:05,445 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:05,445 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. files: [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/6721c44fd03a45d29661d2bdf59876ce, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/18d3c15711914234a42c6cc1b0421b16, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/5b5855a0e5cb40d19fdff5e737b43ebd, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/285b1029a02c488b85871edf0fcf3e25] 2024-11-20T15:22:05,445 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49366 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:22:05,445 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e74b15d2fcbe758642cc4f5cf813621/B is initiating minor compaction (all files) 2024-11-20T15:22:05,445 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e74b15d2fcbe758642cc4f5cf813621/B in TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:05,446 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/51cd20524bc24ecaadce948884513108, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/e51546b10a814d31919e6671623a8d49, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/c5299258c235490285382b40e8312864, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/dc0904a8fba44585ae5d65238b860085] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp, totalSize=48.2 K 2024-11-20T15:22:05,446 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6721c44fd03a45d29661d2bdf59876ce, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1732116119477 2024-11-20T15:22:05,446 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 51cd20524bc24ecaadce948884513108, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1732116119477 2024-11-20T15:22:05,446 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 18d3c15711914234a42c6cc1b0421b16, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732116120641 2024-11-20T15:22:05,447 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting e51546b10a814d31919e6671623a8d49, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732116120641 2024-11-20T15:22:05,447 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b5855a0e5cb40d19fdff5e737b43ebd, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1732116120817 2024-11-20T15:22:05,447 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting c5299258c235490285382b40e8312864, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1732116120817 2024-11-20T15:22:05,448 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 285b1029a02c488b85871edf0fcf3e25, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1732116122018 2024-11-20T15:22:05,448 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting dc0904a8fba44585ae5d65238b860085, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1732116122018 2024-11-20T15:22:05,458 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:22:05,460 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e74b15d2fcbe758642cc4f5cf813621#B#compaction#201 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:05,460 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/78bdcc0a5fc449f7b1d23012f2279df5 is 50, key is test_row_0/B:col10/1732116122026/Put/seqid=0 2024-11-20T15:22:05,461 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120b4d3da0699964cb5a88cc56bb694d7f2_6e74b15d2fcbe758642cc4f5cf813621 store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:22:05,463 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120b4d3da0699964cb5a88cc56bb694d7f2_6e74b15d2fcbe758642cc4f5cf813621, store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:22:05,463 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b4d3da0699964cb5a88cc56bb694d7f2_6e74b15d2fcbe758642cc4f5cf813621 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:22:05,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742062_1238 (size=12949) 2024-11-20T15:22:05,478 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/78bdcc0a5fc449f7b1d23012f2279df5 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/78bdcc0a5fc449f7b1d23012f2279df5 2024-11-20T15:22:05,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742063_1239 (size=4469) 2024-11-20T15:22:05,485 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e74b15d2fcbe758642cc4f5cf813621#A#compaction#202 average throughput is 0.90 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:05,485 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/7821dddf8c1546708dfd669b60cf9a2e is 175, key is test_row_0/A:col10/1732116122026/Put/seqid=0 2024-11-20T15:22:05,494 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6e74b15d2fcbe758642cc4f5cf813621/B of 6e74b15d2fcbe758642cc4f5cf813621 into 78bdcc0a5fc449f7b1d23012f2279df5(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:05,494 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:22:05,494 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., storeName=6e74b15d2fcbe758642cc4f5cf813621/B, priority=12, startTime=1732116125443; duration=0sec 2024-11-20T15:22:05,494 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:05,494 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e74b15d2fcbe758642cc4f5cf813621:B 2024-11-20T15:22:05,494 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:22:05,496 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49366 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:22:05,496 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e74b15d2fcbe758642cc4f5cf813621/C is initiating minor compaction (all files) 2024-11-20T15:22:05,496 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e74b15d2fcbe758642cc4f5cf813621/C in TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:05,497 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/5363fefa8c274a19b4d4fd39a3455002, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/d1fa9b5f76a948439adf390a0f5f1ded, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/ebb9e112046b4f6f8a682d7a2ae4ed94, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/e317d80acb6548539815b497a4157f75] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp, totalSize=48.2 K 2024-11-20T15:22:05,498 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 5363fefa8c274a19b4d4fd39a3455002, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1732116119477 2024-11-20T15:22:05,498 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting d1fa9b5f76a948439adf390a0f5f1ded, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732116120641 2024-11-20T15:22:05,499 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting ebb9e112046b4f6f8a682d7a2ae4ed94, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1732116120817 2024-11-20T15:22:05,499 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting e317d80acb6548539815b497a4157f75, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1732116122018 2024-11-20T15:22:05,516 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e74b15d2fcbe758642cc4f5cf813621#C#compaction#203 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:05,517 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/9284379e4070457096947821266211d7 is 50, key is test_row_0/C:col10/1732116122026/Put/seqid=0 2024-11-20T15:22:05,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742064_1240 (size=31903) 2024-11-20T15:22:05,526 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/7821dddf8c1546708dfd669b60cf9a2e as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/7821dddf8c1546708dfd669b60cf9a2e 2024-11-20T15:22:05,530 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6e74b15d2fcbe758642cc4f5cf813621/A of 6e74b15d2fcbe758642cc4f5cf813621 into 7821dddf8c1546708dfd669b60cf9a2e(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:05,531 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:22:05,531 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., storeName=6e74b15d2fcbe758642cc4f5cf813621/A, priority=12, startTime=1732116125442; duration=0sec 2024-11-20T15:22:05,531 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:05,531 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e74b15d2fcbe758642cc4f5cf813621:A 2024-11-20T15:22:05,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742065_1241 (size=12949) 2024-11-20T15:22:05,539 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/9284379e4070457096947821266211d7 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/9284379e4070457096947821266211d7 2024-11-20T15:22:05,544 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6e74b15d2fcbe758642cc4f5cf813621/C of 6e74b15d2fcbe758642cc4f5cf813621 into 9284379e4070457096947821266211d7(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:05,544 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:22:05,544 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., storeName=6e74b15d2fcbe758642cc4f5cf813621/C, priority=12, startTime=1732116125443; duration=0sec 2024-11-20T15:22:05,544 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:05,544 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e74b15d2fcbe758642cc4f5cf813621:C 2024-11-20T15:22:05,583 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:05,583 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T15:22:05,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:05,583 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2837): Flushing 6e74b15d2fcbe758642cc4f5cf813621 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-20T15:22:05,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=A 2024-11-20T15:22:05,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:05,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=B 2024-11-20T15:22:05,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:05,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=C 2024-11-20T15:22:05,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:05,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cfcddc2fe9a4438d8380851ff977efdc_6e74b15d2fcbe758642cc4f5cf813621 is 50, key is test_row_0/A:col10/1732116124204/Put/seqid=0 2024-11-20T15:22:05,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742066_1242 (size=12454) 2024-11-20T15:22:05,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T15:22:06,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:06,014 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cfcddc2fe9a4438d8380851ff977efdc_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cfcddc2fe9a4438d8380851ff977efdc_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:06,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/86389c2f574c46ee81eccf8d7904489e, store: [table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:22:06,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/86389c2f574c46ee81eccf8d7904489e is 175, key is test_row_0/A:col10/1732116124204/Put/seqid=0 2024-11-20T15:22:06,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742067_1243 (size=31255) 2024-11-20T15:22:06,021 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=308, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/86389c2f574c46ee81eccf8d7904489e 2024-11-20T15:22:06,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/742e47353e0845ad94e4778a8e95fc95 is 50, key is test_row_0/B:col10/1732116124204/Put/seqid=0 2024-11-20T15:22:06,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742068_1244 (size=12301) 2024-11-20T15:22:06,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T15:22:06,331 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:22:06,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:06,361 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:06,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 297 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116186360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:06,435 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=308 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/742e47353e0845ad94e4778a8e95fc95 2024-11-20T15:22:06,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/96a309bd543e4abf8745907c8d96dc4b is 50, key is test_row_0/C:col10/1732116124204/Put/seqid=0 2024-11-20T15:22:06,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742069_1245 (size=12301) 2024-11-20T15:22:06,462 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:06,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 299 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116186462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:06,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:06,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 301 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116186664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:06,851 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=308 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/96a309bd543e4abf8745907c8d96dc4b 2024-11-20T15:22:06,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/86389c2f574c46ee81eccf8d7904489e as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/86389c2f574c46ee81eccf8d7904489e 2024-11-20T15:22:06,861 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/86389c2f574c46ee81eccf8d7904489e, entries=150, sequenceid=308, filesize=30.5 K 2024-11-20T15:22:06,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/742e47353e0845ad94e4778a8e95fc95 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/742e47353e0845ad94e4778a8e95fc95 2024-11-20T15:22:06,867 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/742e47353e0845ad94e4778a8e95fc95, entries=150, sequenceid=308, filesize=12.0 K 2024-11-20T15:22:06,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/96a309bd543e4abf8745907c8d96dc4b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/96a309bd543e4abf8745907c8d96dc4b 2024-11-20T15:22:06,872 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/96a309bd543e4abf8745907c8d96dc4b, entries=150, sequenceid=308, filesize=12.0 K 2024-11-20T15:22:06,873 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 6e74b15d2fcbe758642cc4f5cf813621 in 1290ms, sequenceid=308, compaction requested=false 2024-11-20T15:22:06,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:22:06,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:06,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-11-20T15:22:06,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-11-20T15:22:06,881 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-11-20T15:22:06,881 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7550 sec 2024-11-20T15:22:06,882 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees in 1.7590 sec 2024-11-20T15:22:06,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:06,968 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e74b15d2fcbe758642cc4f5cf813621 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-20T15:22:06,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=A 2024-11-20T15:22:06,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:06,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=B 2024-11-20T15:22:06,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:06,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=C 2024-11-20T15:22:06,969 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:06,976 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207089c2f367e740109ef01588ee362b7f_6e74b15d2fcbe758642cc4f5cf813621 is 50, key is test_row_0/A:col10/1732116126967/Put/seqid=0 2024-11-20T15:22:06,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742070_1246 (size=14994) 2024-11-20T15:22:06,980 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:06,984 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207089c2f367e740109ef01588ee362b7f_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207089c2f367e740109ef01588ee362b7f_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:06,985 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/396e2e62535c40178e859a9d1c70d55d, store: [table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:22:06,986 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/396e2e62535c40178e859a9d1c70d55d is 175, key is test_row_0/A:col10/1732116126967/Put/seqid=0 2024-11-20T15:22:06,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742071_1247 (size=39949) 2024-11-20T15:22:07,008 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:07,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 319 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116187007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:07,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:07,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 321 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116187110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:07,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T15:22:07,228 INFO [Thread-779 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-11-20T15:22:07,230 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:22:07,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-11-20T15:22:07,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T15:22:07,232 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:22:07,232 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:22:07,233 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:22:07,315 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:07,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 323 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116187312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:07,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T15:22:07,384 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:07,385 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T15:22:07,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:07,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:22:07,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:07,385 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:07,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:07,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:07,390 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=326, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/396e2e62535c40178e859a9d1c70d55d 2024-11-20T15:22:07,399 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/88bd3a1d8eb94cbaba6f433c7d92c73d is 50, key is test_row_0/B:col10/1732116126967/Put/seqid=0 2024-11-20T15:22:07,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742072_1248 (size=12301) 2024-11-20T15:22:07,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T15:22:07,538 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:07,538 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T15:22:07,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:07,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:22:07,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:07,539 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:07,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:07,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:07,621 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:07,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 325 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116187618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:07,691 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:07,691 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T15:22:07,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:07,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:22:07,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:07,692 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:07,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:07,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:07,803 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/88bd3a1d8eb94cbaba6f433c7d92c73d 2024-11-20T15:22:07,811 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/3e16b40a924d42b4a76fb1cddc2c821d is 50, key is test_row_0/C:col10/1732116126967/Put/seqid=0 2024-11-20T15:22:07,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742073_1249 (size=12301) 2024-11-20T15:22:07,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T15:22:07,844 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:07,844 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T15:22:07,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:07,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:22:07,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:07,845 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:07,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:07,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:07,997 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:07,998 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T15:22:07,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:07,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:22:07,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:07,998 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:07,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:07,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:08,123 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:08,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 327 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116188123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:08,150 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:08,151 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T15:22:08,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:08,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:22:08,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:08,151 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:08,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:08,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:08,216 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/3e16b40a924d42b4a76fb1cddc2c821d 2024-11-20T15:22:08,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/396e2e62535c40178e859a9d1c70d55d as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/396e2e62535c40178e859a9d1c70d55d 2024-11-20T15:22:08,224 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/396e2e62535c40178e859a9d1c70d55d, entries=200, sequenceid=326, filesize=39.0 K 2024-11-20T15:22:08,225 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/88bd3a1d8eb94cbaba6f433c7d92c73d as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/88bd3a1d8eb94cbaba6f433c7d92c73d 2024-11-20T15:22:08,229 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/88bd3a1d8eb94cbaba6f433c7d92c73d, entries=150, sequenceid=326, filesize=12.0 K 2024-11-20T15:22:08,230 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/3e16b40a924d42b4a76fb1cddc2c821d as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/3e16b40a924d42b4a76fb1cddc2c821d 2024-11-20T15:22:08,234 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/3e16b40a924d42b4a76fb1cddc2c821d, entries=150, sequenceid=326, filesize=12.0 K 2024-11-20T15:22:08,235 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 6e74b15d2fcbe758642cc4f5cf813621 in 1267ms, sequenceid=326, compaction requested=true 2024-11-20T15:22:08,235 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:22:08,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e74b15d2fcbe758642cc4f5cf813621:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:22:08,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:08,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e74b15d2fcbe758642cc4f5cf813621:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:22:08,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:08,235 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:08,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e74b15d2fcbe758642cc4f5cf813621:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:22:08,235 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:08,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:08,236 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:08,236 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:08,236 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 6e74b15d2fcbe758642cc4f5cf813621/A is initiating minor compaction (all files) 2024-11-20T15:22:08,236 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e74b15d2fcbe758642cc4f5cf813621/B is initiating minor compaction (all files) 2024-11-20T15:22:08,236 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e74b15d2fcbe758642cc4f5cf813621/B in TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:08,236 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e74b15d2fcbe758642cc4f5cf813621/A in TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:08,237 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/78bdcc0a5fc449f7b1d23012f2279df5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/742e47353e0845ad94e4778a8e95fc95, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/88bd3a1d8eb94cbaba6f433c7d92c73d] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp, totalSize=36.7 K 2024-11-20T15:22:08,237 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/7821dddf8c1546708dfd669b60cf9a2e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/86389c2f574c46ee81eccf8d7904489e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/396e2e62535c40178e859a9d1c70d55d] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp, totalSize=100.7 K 2024-11-20T15:22:08,237 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:08,237 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. files: [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/7821dddf8c1546708dfd669b60cf9a2e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/86389c2f574c46ee81eccf8d7904489e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/396e2e62535c40178e859a9d1c70d55d] 2024-11-20T15:22:08,237 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 78bdcc0a5fc449f7b1d23012f2279df5, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1732116122018 2024-11-20T15:22:08,237 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7821dddf8c1546708dfd669b60cf9a2e, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1732116122018 2024-11-20T15:22:08,237 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 742e47353e0845ad94e4778a8e95fc95, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=308, earliestPutTs=1732116124194 2024-11-20T15:22:08,237 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 86389c2f574c46ee81eccf8d7904489e, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=308, earliestPutTs=1732116124194 2024-11-20T15:22:08,238 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 88bd3a1d8eb94cbaba6f433c7d92c73d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1732116126346 2024-11-20T15:22:08,238 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 396e2e62535c40178e859a9d1c70d55d, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1732116126346 2024-11-20T15:22:08,245 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e74b15d2fcbe758642cc4f5cf813621#B#compaction#210 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:08,246 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/245f3636d81a43f1bbe5db4052805c60 is 50, key is test_row_0/B:col10/1732116126967/Put/seqid=0 2024-11-20T15:22:08,249 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:22:08,260 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112064793f45a6544d5389594d02bd21a972_6e74b15d2fcbe758642cc4f5cf813621 store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:22:08,262 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112064793f45a6544d5389594d02bd21a972_6e74b15d2fcbe758642cc4f5cf813621, store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:22:08,262 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112064793f45a6544d5389594d02bd21a972_6e74b15d2fcbe758642cc4f5cf813621 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:22:08,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742074_1250 (size=13051) 2024-11-20T15:22:08,269 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/245f3636d81a43f1bbe5db4052805c60 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/245f3636d81a43f1bbe5db4052805c60 2024-11-20T15:22:08,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742075_1251 (size=4469) 2024-11-20T15:22:08,271 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e74b15d2fcbe758642cc4f5cf813621#A#compaction#211 average throughput is 1.11 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:08,272 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/c31d5163028a4d748edc32070b36c084 is 175, key is test_row_0/A:col10/1732116126967/Put/seqid=0 2024-11-20T15:22:08,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742076_1252 (size=32005) 2024-11-20T15:22:08,277 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e74b15d2fcbe758642cc4f5cf813621/B of 6e74b15d2fcbe758642cc4f5cf813621 into 245f3636d81a43f1bbe5db4052805c60(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:08,277 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:22:08,277 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., storeName=6e74b15d2fcbe758642cc4f5cf813621/B, priority=13, startTime=1732116128235; duration=0sec 2024-11-20T15:22:08,277 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:08,277 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e74b15d2fcbe758642cc4f5cf813621:B 2024-11-20T15:22:08,277 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:08,279 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:08,279 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e74b15d2fcbe758642cc4f5cf813621/C is initiating minor compaction (all files) 2024-11-20T15:22:08,280 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e74b15d2fcbe758642cc4f5cf813621/C in TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:08,280 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/9284379e4070457096947821266211d7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/96a309bd543e4abf8745907c8d96dc4b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/3e16b40a924d42b4a76fb1cddc2c821d] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp, totalSize=36.7 K 2024-11-20T15:22:08,280 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 9284379e4070457096947821266211d7, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1732116122018 2024-11-20T15:22:08,280 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 96a309bd543e4abf8745907c8d96dc4b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=308, earliestPutTs=1732116124194 2024-11-20T15:22:08,281 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e16b40a924d42b4a76fb1cddc2c821d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1732116126346 2024-11-20T15:22:08,281 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/c31d5163028a4d748edc32070b36c084 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c31d5163028a4d748edc32070b36c084 2024-11-20T15:22:08,287 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e74b15d2fcbe758642cc4f5cf813621/A of 6e74b15d2fcbe758642cc4f5cf813621 into c31d5163028a4d748edc32070b36c084(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:08,287 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:22:08,287 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., storeName=6e74b15d2fcbe758642cc4f5cf813621/A, priority=13, startTime=1732116128235; duration=0sec 2024-11-20T15:22:08,287 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:08,287 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e74b15d2fcbe758642cc4f5cf813621:A 2024-11-20T15:22:08,290 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e74b15d2fcbe758642cc4f5cf813621#C#compaction#212 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:08,290 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/fe750421359a49f1a512434874a1098a is 50, key is test_row_0/C:col10/1732116126967/Put/seqid=0 2024-11-20T15:22:08,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742077_1253 (size=13051) 2024-11-20T15:22:08,303 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:08,303 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T15:22:08,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:08,304 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 6e74b15d2fcbe758642cc4f5cf813621 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-20T15:22:08,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=A 2024-11-20T15:22:08,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:08,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=B 2024-11-20T15:22:08,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:08,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=C 2024-11-20T15:22:08,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:08,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112073849e91cac54707a9346f64c630e0ba_6e74b15d2fcbe758642cc4f5cf813621 is 50, key is test_row_0/A:col10/1732116127003/Put/seqid=0 2024-11-20T15:22:08,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742078_1254 (size=12454) 2024-11-20T15:22:08,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T15:22:08,385 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T15:22:08,701 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/fe750421359a49f1a512434874a1098a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/fe750421359a49f1a512434874a1098a 2024-11-20T15:22:08,706 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e74b15d2fcbe758642cc4f5cf813621/C of 6e74b15d2fcbe758642cc4f5cf813621 into fe750421359a49f1a512434874a1098a(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:08,706 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:22:08,706 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., storeName=6e74b15d2fcbe758642cc4f5cf813621/C, priority=13, startTime=1732116128235; duration=0sec 2024-11-20T15:22:08,706 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:08,706 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e74b15d2fcbe758642cc4f5cf813621:C 2024-11-20T15:22:08,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:08,724 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112073849e91cac54707a9346f64c630e0ba_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112073849e91cac54707a9346f64c630e0ba_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:08,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/c437d646ed08487b91784d75f6b4f96e, store: [table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:22:08,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/c437d646ed08487b91784d75f6b4f96e is 175, key is test_row_0/A:col10/1732116127003/Put/seqid=0 2024-11-20T15:22:08,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742079_1255 (size=31255) 2024-11-20T15:22:09,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:09,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. as already flushing 2024-11-20T15:22:09,131 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=346, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/c437d646ed08487b91784d75f6b4f96e 2024-11-20T15:22:09,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/e04fe2942f96437fadd6867880861b55 is 50, key is test_row_0/B:col10/1732116127003/Put/seqid=0 2024-11-20T15:22:09,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742080_1256 (size=12301) 2024-11-20T15:22:09,145 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=346 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/e04fe2942f96437fadd6867880861b55 2024-11-20T15:22:09,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/6ca27e3d2a9d491db38b91768cdf6daf is 50, key is test_row_0/C:col10/1732116127003/Put/seqid=0 2024-11-20T15:22:09,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742081_1257 (size=12301) 2024-11-20T15:22:09,166 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:09,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 344 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116189164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:09,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:09,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 346 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116189267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:09,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T15:22:09,470 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:09,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 348 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58970 deadline: 1732116189470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:09,559 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=346 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/6ca27e3d2a9d491db38b91768cdf6daf 2024-11-20T15:22:09,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/c437d646ed08487b91784d75f6b4f96e as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c437d646ed08487b91784d75f6b4f96e 2024-11-20T15:22:09,569 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c437d646ed08487b91784d75f6b4f96e, entries=150, sequenceid=346, filesize=30.5 K 2024-11-20T15:22:09,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/e04fe2942f96437fadd6867880861b55 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/e04fe2942f96437fadd6867880861b55 2024-11-20T15:22:09,574 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/e04fe2942f96437fadd6867880861b55, entries=150, sequenceid=346, filesize=12.0 K 2024-11-20T15:22:09,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/6ca27e3d2a9d491db38b91768cdf6daf as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/6ca27e3d2a9d491db38b91768cdf6daf 2024-11-20T15:22:09,579 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/6ca27e3d2a9d491db38b91768cdf6daf, entries=150, sequenceid=346, filesize=12.0 K 2024-11-20T15:22:09,580 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 6e74b15d2fcbe758642cc4f5cf813621 in 1276ms, sequenceid=346, compaction requested=false 2024-11-20T15:22:09,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:22:09,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:09,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-11-20T15:22:09,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-11-20T15:22:09,587 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-11-20T15:22:09,587 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3520 sec 2024-11-20T15:22:09,588 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 2.3570 sec 2024-11-20T15:22:09,746 DEBUG [Thread-782 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x14c16cd4 to 127.0.0.1:62338 2024-11-20T15:22:09,746 DEBUG [Thread-782 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:22:09,747 DEBUG [Thread-780 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x78cafade to 127.0.0.1:62338 2024-11-20T15:22:09,747 DEBUG [Thread-780 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:22:09,747 DEBUG [Thread-786 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x26b120d9 to 127.0.0.1:62338 2024-11-20T15:22:09,747 DEBUG [Thread-786 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:22:09,748 DEBUG [Thread-784 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0341384e to 127.0.0.1:62338 2024-11-20T15:22:09,748 DEBUG [Thread-784 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:22:09,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:09,774 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e74b15d2fcbe758642cc4f5cf813621 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-20T15:22:09,774 DEBUG [Thread-777 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2885d2d9 to 127.0.0.1:62338 2024-11-20T15:22:09,774 DEBUG [Thread-777 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:22:09,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=A 2024-11-20T15:22:09,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:09,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=B 2024-11-20T15:22:09,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:09,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=C 2024-11-20T15:22:09,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:09,781 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207376f4f4b3da4a8c99daccf252e6f9b9_6e74b15d2fcbe758642cc4f5cf813621 is 50, key is test_row_0/A:col10/1732116129158/Put/seqid=0 2024-11-20T15:22:09,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742082_1258 (size=12454) 2024-11-20T15:22:10,185 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:10,189 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207376f4f4b3da4a8c99daccf252e6f9b9_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207376f4f4b3da4a8c99daccf252e6f9b9_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:10,190 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/136a319ea48348a38402e90eff307455, store: [table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:22:10,190 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/136a319ea48348a38402e90eff307455 is 175, key is test_row_0/A:col10/1732116129158/Put/seqid=0 2024-11-20T15:22:10,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742083_1259 (size=31255) 2024-11-20T15:22:10,194 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=366, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/136a319ea48348a38402e90eff307455 2024-11-20T15:22:10,201 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/7aab9492769848c08d7ed853b0e534dd is 50, key is test_row_0/B:col10/1732116129158/Put/seqid=0 2024-11-20T15:22:10,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742084_1260 (size=12301) 2024-11-20T15:22:10,605 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/7aab9492769848c08d7ed853b0e534dd 2024-11-20T15:22:10,611 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/980a8135b51c4cdc950774cb012639bc is 50, key is test_row_0/C:col10/1732116129158/Put/seqid=0 2024-11-20T15:22:10,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742085_1261 (size=12301) 2024-11-20T15:22:10,981 DEBUG [Thread-771 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x491ea2ee to 127.0.0.1:62338 2024-11-20T15:22:10,981 DEBUG [Thread-771 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:22:10,992 DEBUG [Thread-773 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x190853fc to 127.0.0.1:62338 2024-11-20T15:22:10,992 DEBUG [Thread-773 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:22:10,994 DEBUG [Thread-775 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x46114993 to 127.0.0.1:62338 2024-11-20T15:22:10,994 DEBUG [Thread-775 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:22:11,011 DEBUG [Thread-769 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2931c73e to 127.0.0.1:62338 2024-11-20T15:22:11,011 DEBUG [Thread-769 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:22:11,015 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/980a8135b51c4cdc950774cb012639bc 2024-11-20T15:22:11,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/136a319ea48348a38402e90eff307455 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/136a319ea48348a38402e90eff307455 2024-11-20T15:22:11,023 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/136a319ea48348a38402e90eff307455, entries=150, sequenceid=366, filesize=30.5 K 2024-11-20T15:22:11,024 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/7aab9492769848c08d7ed853b0e534dd as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/7aab9492769848c08d7ed853b0e534dd 2024-11-20T15:22:11,027 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/7aab9492769848c08d7ed853b0e534dd, entries=150, sequenceid=366, filesize=12.0 K 2024-11-20T15:22:11,028 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/980a8135b51c4cdc950774cb012639bc as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/980a8135b51c4cdc950774cb012639bc 2024-11-20T15:22:11,031 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/980a8135b51c4cdc950774cb012639bc, entries=150, sequenceid=366, filesize=12.0 K 2024-11-20T15:22:11,032 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=26.84 KB/27480 for 6e74b15d2fcbe758642cc4f5cf813621 in 1258ms, sequenceid=366, compaction requested=true 2024-11-20T15:22:11,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:22:11,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e74b15d2fcbe758642cc4f5cf813621:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:22:11,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:11,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e74b15d2fcbe758642cc4f5cf813621:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:22:11,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:11,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e74b15d2fcbe758642cc4f5cf813621:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:22:11,032 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:11,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:11,032 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:11,033 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:11,033 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94515 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:11,033 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e74b15d2fcbe758642cc4f5cf813621/B is initiating minor compaction (all files) 2024-11-20T15:22:11,033 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 6e74b15d2fcbe758642cc4f5cf813621/A is initiating minor compaction (all files) 2024-11-20T15:22:11,033 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e74b15d2fcbe758642cc4f5cf813621/B in TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:11,033 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e74b15d2fcbe758642cc4f5cf813621/A in TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:11,033 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/245f3636d81a43f1bbe5db4052805c60, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/e04fe2942f96437fadd6867880861b55, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/7aab9492769848c08d7ed853b0e534dd] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp, totalSize=36.8 K 2024-11-20T15:22:11,033 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c31d5163028a4d748edc32070b36c084, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c437d646ed08487b91784d75f6b4f96e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/136a319ea48348a38402e90eff307455] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp, totalSize=92.3 K 2024-11-20T15:22:11,034 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:11,034 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. files: [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c31d5163028a4d748edc32070b36c084, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c437d646ed08487b91784d75f6b4f96e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/136a319ea48348a38402e90eff307455] 2024-11-20T15:22:11,034 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 245f3636d81a43f1bbe5db4052805c60, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1732116126346 2024-11-20T15:22:11,034 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting c31d5163028a4d748edc32070b36c084, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1732116126346 2024-11-20T15:22:11,034 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting e04fe2942f96437fadd6867880861b55, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1732116126993 2024-11-20T15:22:11,034 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting c437d646ed08487b91784d75f6b4f96e, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1732116126993 2024-11-20T15:22:11,035 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 136a319ea48348a38402e90eff307455, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732116129158 2024-11-20T15:22:11,035 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 7aab9492769848c08d7ed853b0e534dd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732116129158 2024-11-20T15:22:11,043 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e74b15d2fcbe758642cc4f5cf813621#B#compaction#219 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:11,044 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/6e8db44ecb1c490e85a9c02af3c34a81 is 50, key is test_row_0/B:col10/1732116129158/Put/seqid=0 2024-11-20T15:22:11,045 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:22:11,047 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112007020ea3de194edc8dca340930d02845_6e74b15d2fcbe758642cc4f5cf813621 store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:22:11,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742086_1262 (size=13153) 2024-11-20T15:22:11,070 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112007020ea3de194edc8dca340930d02845_6e74b15d2fcbe758642cc4f5cf813621, store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:22:11,070 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112007020ea3de194edc8dca340930d02845_6e74b15d2fcbe758642cc4f5cf813621 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:22:11,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742087_1263 (size=4469) 2024-11-20T15:22:11,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T15:22:11,338 INFO [Thread-779 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-11-20T15:22:11,338 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T15:22:11,338 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 16 2024-11-20T15:22:11,338 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 17 2024-11-20T15:22:11,338 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 15 2024-11-20T15:22:11,338 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 16 2024-11-20T15:22:11,338 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 227 2024-11-20T15:22:11,338 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T15:22:11,338 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7680 2024-11-20T15:22:11,338 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7394 2024-11-20T15:22:11,338 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T15:22:11,338 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3187 2024-11-20T15:22:11,338 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9561 rows 2024-11-20T15:22:11,338 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3168 2024-11-20T15:22:11,338 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9503 rows 2024-11-20T15:22:11,338 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T15:22:11,338 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5d29de25 to 127.0.0.1:62338 2024-11-20T15:22:11,338 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:22:11,341 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T15:22:11,341 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T15:22:11,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T15:22:11,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T15:22:11,345 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732116131345"}]},"ts":"1732116131345"} 2024-11-20T15:22:11,346 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T15:22:11,349 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T15:22:11,349 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T15:22:11,350 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e74b15d2fcbe758642cc4f5cf813621, UNASSIGN}] 2024-11-20T15:22:11,351 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e74b15d2fcbe758642cc4f5cf813621, UNASSIGN 2024-11-20T15:22:11,352 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=75 updating hbase:meta row=6e74b15d2fcbe758642cc4f5cf813621, regionState=CLOSING, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:22:11,352 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T15:22:11,352 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; CloseRegionProcedure 6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954}] 2024-11-20T15:22:11,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T15:22:11,454 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/6e8db44ecb1c490e85a9c02af3c34a81 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/6e8db44ecb1c490e85a9c02af3c34a81 2024-11-20T15:22:11,458 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e74b15d2fcbe758642cc4f5cf813621/B of 6e74b15d2fcbe758642cc4f5cf813621 into 6e8db44ecb1c490e85a9c02af3c34a81(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:11,459 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:22:11,459 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., storeName=6e74b15d2fcbe758642cc4f5cf813621/B, priority=13, startTime=1732116131032; duration=0sec 2024-11-20T15:22:11,459 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:11,459 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e74b15d2fcbe758642cc4f5cf813621:B 2024-11-20T15:22:11,459 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:11,460 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:11,460 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e74b15d2fcbe758642cc4f5cf813621/C is initiating minor compaction (all files) 2024-11-20T15:22:11,460 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e74b15d2fcbe758642cc4f5cf813621/C in TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:11,460 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/fe750421359a49f1a512434874a1098a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/6ca27e3d2a9d491db38b91768cdf6daf, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/980a8135b51c4cdc950774cb012639bc] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp, totalSize=36.8 K 2024-11-20T15:22:11,460 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting fe750421359a49f1a512434874a1098a, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1732116126346 2024-11-20T15:22:11,461 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ca27e3d2a9d491db38b91768cdf6daf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1732116126993 2024-11-20T15:22:11,461 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 980a8135b51c4cdc950774cb012639bc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732116129158 2024-11-20T15:22:11,468 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e74b15d2fcbe758642cc4f5cf813621#C#compaction#221 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:11,469 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/a155843e917d443dbf45456c9a942938 is 50, key is test_row_0/C:col10/1732116129158/Put/seqid=0 2024-11-20T15:22:11,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742088_1264 (size=13153) 2024-11-20T15:22:11,475 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e74b15d2fcbe758642cc4f5cf813621#A#compaction#220 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:11,476 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/e39a56a4e3254cf08351fc043b5d5527 is 175, key is test_row_0/A:col10/1732116129158/Put/seqid=0 2024-11-20T15:22:11,477 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/a155843e917d443dbf45456c9a942938 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/a155843e917d443dbf45456c9a942938 2024-11-20T15:22:11,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742089_1265 (size=32107) 2024-11-20T15:22:11,481 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e74b15d2fcbe758642cc4f5cf813621/C of 6e74b15d2fcbe758642cc4f5cf813621 into a155843e917d443dbf45456c9a942938(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:11,481 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:22:11,481 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., storeName=6e74b15d2fcbe758642cc4f5cf813621/C, priority=13, startTime=1732116131032; duration=0sec 2024-11-20T15:22:11,481 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:11,481 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e74b15d2fcbe758642cc4f5cf813621:C 2024-11-20T15:22:11,504 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:11,504 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] handler.UnassignRegionHandler(124): Close 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:11,504 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T15:22:11,505 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HRegion(1681): Closing 6e74b15d2fcbe758642cc4f5cf813621, disabling compactions & flushes 2024-11-20T15:22:11,505 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HRegion(1942): waiting for 1 compactions to complete for region TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:11,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T15:22:11,884 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/e39a56a4e3254cf08351fc043b5d5527 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/e39a56a4e3254cf08351fc043b5d5527 2024-11-20T15:22:11,888 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e74b15d2fcbe758642cc4f5cf813621/A of 6e74b15d2fcbe758642cc4f5cf813621 into e39a56a4e3254cf08351fc043b5d5527(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:11,888 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:22:11,888 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621., storeName=6e74b15d2fcbe758642cc4f5cf813621/A, priority=13, startTime=1732116131032; duration=0sec 2024-11-20T15:22:11,889 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:11,889 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e74b15d2fcbe758642cc4f5cf813621:A 2024-11-20T15:22:11,889 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:11,889 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:11,889 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. after waiting 0 ms 2024-11-20T15:22:11,889 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:11,889 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HRegion(2837): Flushing 6e74b15d2fcbe758642cc4f5cf813621 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-20T15:22:11,889 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=A 2024-11-20T15:22:11,889 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:11,889 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=B 2024-11-20T15:22:11,889 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:11,889 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e74b15d2fcbe758642cc4f5cf813621, store=C 2024-11-20T15:22:11,889 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:11,896 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a8498e1ba8614b899e3689b31b054a9f_6e74b15d2fcbe758642cc4f5cf813621 is 50, key is test_row_0/A:col10/1732116130991/Put/seqid=0 2024-11-20T15:22:11,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742090_1266 (size=9914) 2024-11-20T15:22:11,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T15:22:12,300 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:12,304 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a8498e1ba8614b899e3689b31b054a9f_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a8498e1ba8614b899e3689b31b054a9f_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:12,305 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/77180fd932124f4c923f5cb8137983d9, store: [table=TestAcidGuarantees family=A region=6e74b15d2fcbe758642cc4f5cf813621] 2024-11-20T15:22:12,306 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/77180fd932124f4c923f5cb8137983d9 is 175, key is test_row_0/A:col10/1732116130991/Put/seqid=0 2024-11-20T15:22:12,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742091_1267 (size=22561) 2024-11-20T15:22:12,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T15:22:12,710 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=376, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/77180fd932124f4c923f5cb8137983d9 2024-11-20T15:22:12,718 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/af20d3a7281e45ba984e57f27d135472 is 50, key is test_row_0/B:col10/1732116130991/Put/seqid=0 2024-11-20T15:22:12,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742092_1268 (size=9857) 2024-11-20T15:22:13,122 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=376 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/af20d3a7281e45ba984e57f27d135472 2024-11-20T15:22:13,129 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/0edfb82ac9da43e3996ce668dce0ccf6 is 50, key is test_row_0/C:col10/1732116130991/Put/seqid=0 2024-11-20T15:22:13,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742093_1269 (size=9857) 2024-11-20T15:22:13,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T15:22:13,533 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=376 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/0edfb82ac9da43e3996ce668dce0ccf6 2024-11-20T15:22:13,537 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/A/77180fd932124f4c923f5cb8137983d9 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/77180fd932124f4c923f5cb8137983d9 2024-11-20T15:22:13,541 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/77180fd932124f4c923f5cb8137983d9, entries=100, sequenceid=376, filesize=22.0 K 2024-11-20T15:22:13,541 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/B/af20d3a7281e45ba984e57f27d135472 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/af20d3a7281e45ba984e57f27d135472 2024-11-20T15:22:13,545 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/af20d3a7281e45ba984e57f27d135472, entries=100, sequenceid=376, filesize=9.6 K 2024-11-20T15:22:13,545 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/.tmp/C/0edfb82ac9da43e3996ce668dce0ccf6 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/0edfb82ac9da43e3996ce668dce0ccf6 2024-11-20T15:22:13,548 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/0edfb82ac9da43e3996ce668dce0ccf6, entries=100, sequenceid=376, filesize=9.6 K 2024-11-20T15:22:13,549 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 6e74b15d2fcbe758642cc4f5cf813621 in 1660ms, sequenceid=376, compaction requested=false 2024-11-20T15:22:13,550 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/edbb8930bc1c4716a1c3f20c1522fb67, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c07072bab03e4092b4e02a83205c14e5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c1a8c18689b4489695165f677e4b9d93, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/6297cf0fe6fb4e95a12e7e5f0da8a03d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c2e88296ae714b06b8e8e949681dfd1d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/82435ee830cc4dfcb480b1b9f8bbdbf9, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/16fea317fd7d42b3a8540ec28090d00c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/d9aa355e30814b80acf0df0081e69f95, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/a3d28bfc4a924d929635b656781f83f9, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/888d0d3bded34ec7973c1746f0572a2d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/7d4deeb8ba6a49489dcd3453280dc718, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/86c0057a3df5432b9ba4b79fea69eed1, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/0bc1d78a18aa4b77a588f96307b7feb7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/2026d6e5849a44dc8e0ba562a4a19af6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/6721c44fd03a45d29661d2bdf59876ce, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/56dec9975c9740ae83b9eb80a559c810, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/18d3c15711914234a42c6cc1b0421b16, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/5b5855a0e5cb40d19fdff5e737b43ebd, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/285b1029a02c488b85871edf0fcf3e25, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/7821dddf8c1546708dfd669b60cf9a2e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/86389c2f574c46ee81eccf8d7904489e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/396e2e62535c40178e859a9d1c70d55d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c31d5163028a4d748edc32070b36c084, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c437d646ed08487b91784d75f6b4f96e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/136a319ea48348a38402e90eff307455] to archive 2024-11-20T15:22:13,551 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T15:22:13,552 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/edbb8930bc1c4716a1c3f20c1522fb67 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/edbb8930bc1c4716a1c3f20c1522fb67 2024-11-20T15:22:13,553 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c07072bab03e4092b4e02a83205c14e5 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c07072bab03e4092b4e02a83205c14e5 2024-11-20T15:22:13,554 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c1a8c18689b4489695165f677e4b9d93 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c1a8c18689b4489695165f677e4b9d93 2024-11-20T15:22:13,555 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/6297cf0fe6fb4e95a12e7e5f0da8a03d to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/6297cf0fe6fb4e95a12e7e5f0da8a03d 2024-11-20T15:22:13,556 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c2e88296ae714b06b8e8e949681dfd1d to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c2e88296ae714b06b8e8e949681dfd1d 2024-11-20T15:22:13,557 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/82435ee830cc4dfcb480b1b9f8bbdbf9 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/82435ee830cc4dfcb480b1b9f8bbdbf9 2024-11-20T15:22:13,558 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/16fea317fd7d42b3a8540ec28090d00c to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/16fea317fd7d42b3a8540ec28090d00c 2024-11-20T15:22:13,559 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/d9aa355e30814b80acf0df0081e69f95 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/d9aa355e30814b80acf0df0081e69f95 2024-11-20T15:22:13,559 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/a3d28bfc4a924d929635b656781f83f9 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/a3d28bfc4a924d929635b656781f83f9 2024-11-20T15:22:13,560 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/888d0d3bded34ec7973c1746f0572a2d to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/888d0d3bded34ec7973c1746f0572a2d 2024-11-20T15:22:13,561 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/7d4deeb8ba6a49489dcd3453280dc718 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/7d4deeb8ba6a49489dcd3453280dc718 2024-11-20T15:22:13,562 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/86c0057a3df5432b9ba4b79fea69eed1 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/86c0057a3df5432b9ba4b79fea69eed1 2024-11-20T15:22:13,563 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/0bc1d78a18aa4b77a588f96307b7feb7 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/0bc1d78a18aa4b77a588f96307b7feb7 2024-11-20T15:22:13,564 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/2026d6e5849a44dc8e0ba562a4a19af6 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/2026d6e5849a44dc8e0ba562a4a19af6 2024-11-20T15:22:13,565 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/6721c44fd03a45d29661d2bdf59876ce to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/6721c44fd03a45d29661d2bdf59876ce 2024-11-20T15:22:13,566 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/56dec9975c9740ae83b9eb80a559c810 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/56dec9975c9740ae83b9eb80a559c810 2024-11-20T15:22:13,567 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/18d3c15711914234a42c6cc1b0421b16 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/18d3c15711914234a42c6cc1b0421b16 2024-11-20T15:22:13,568 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/5b5855a0e5cb40d19fdff5e737b43ebd to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/5b5855a0e5cb40d19fdff5e737b43ebd 2024-11-20T15:22:13,569 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/285b1029a02c488b85871edf0fcf3e25 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/285b1029a02c488b85871edf0fcf3e25 2024-11-20T15:22:13,570 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/7821dddf8c1546708dfd669b60cf9a2e to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/7821dddf8c1546708dfd669b60cf9a2e 2024-11-20T15:22:13,571 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/86389c2f574c46ee81eccf8d7904489e to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/86389c2f574c46ee81eccf8d7904489e 2024-11-20T15:22:13,572 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/396e2e62535c40178e859a9d1c70d55d to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/396e2e62535c40178e859a9d1c70d55d 2024-11-20T15:22:13,573 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c31d5163028a4d748edc32070b36c084 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c31d5163028a4d748edc32070b36c084 2024-11-20T15:22:13,573 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c437d646ed08487b91784d75f6b4f96e to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/c437d646ed08487b91784d75f6b4f96e 2024-11-20T15:22:13,574 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/136a319ea48348a38402e90eff307455 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/136a319ea48348a38402e90eff307455 2024-11-20T15:22:13,576 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/12bccbf2bfdc43299dc6d4e7b20ae1f3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/b4289aaa69a84340837d0b9182fec570, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/a07bc14cba89470ab8425c4db3c4215d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/a511fe7965914f67a576245380590f18, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/1c69185bf997447cb037095a9eb1ff30, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/6551b5b064ec4bd3946b484d642a9c47, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/f8762f1fa06643dfaf1ebf7e2e80fad0, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/dfc21d08c27245a1bd06e6e9fea34479, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/69ad4e6a1bae458b85889f336f40d58b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/c70df523c2e84d29940a00d2d5514cb0, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/7e493d94736b48369c18a3ff8e5e82f7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/6ca00cebd45746e0af2e6e267ac9ec00, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/d1d76b7118664216b63c3d9577a335a3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/b64b2e48bde04a5a8f46c78f0eae5d11, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/51cd20524bc24ecaadce948884513108, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/33360f8b9b6f4965963105120e200617, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/e51546b10a814d31919e6671623a8d49, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/c5299258c235490285382b40e8312864, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/78bdcc0a5fc449f7b1d23012f2279df5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/dc0904a8fba44585ae5d65238b860085, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/742e47353e0845ad94e4778a8e95fc95, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/245f3636d81a43f1bbe5db4052805c60, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/88bd3a1d8eb94cbaba6f433c7d92c73d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/e04fe2942f96437fadd6867880861b55, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/7aab9492769848c08d7ed853b0e534dd] to archive 2024-11-20T15:22:13,576 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T15:22:13,578 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/12bccbf2bfdc43299dc6d4e7b20ae1f3 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/12bccbf2bfdc43299dc6d4e7b20ae1f3 2024-11-20T15:22:13,579 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/b4289aaa69a84340837d0b9182fec570 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/b4289aaa69a84340837d0b9182fec570 2024-11-20T15:22:13,580 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/a07bc14cba89470ab8425c4db3c4215d to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/a07bc14cba89470ab8425c4db3c4215d 2024-11-20T15:22:13,581 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/a511fe7965914f67a576245380590f18 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/a511fe7965914f67a576245380590f18 2024-11-20T15:22:13,582 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/1c69185bf997447cb037095a9eb1ff30 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/1c69185bf997447cb037095a9eb1ff30 2024-11-20T15:22:13,583 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/6551b5b064ec4bd3946b484d642a9c47 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/6551b5b064ec4bd3946b484d642a9c47 2024-11-20T15:22:13,584 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/f8762f1fa06643dfaf1ebf7e2e80fad0 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/f8762f1fa06643dfaf1ebf7e2e80fad0 2024-11-20T15:22:13,585 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/dfc21d08c27245a1bd06e6e9fea34479 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/dfc21d08c27245a1bd06e6e9fea34479 2024-11-20T15:22:13,585 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/69ad4e6a1bae458b85889f336f40d58b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/69ad4e6a1bae458b85889f336f40d58b 2024-11-20T15:22:13,586 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/c70df523c2e84d29940a00d2d5514cb0 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/c70df523c2e84d29940a00d2d5514cb0 2024-11-20T15:22:13,587 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/7e493d94736b48369c18a3ff8e5e82f7 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/7e493d94736b48369c18a3ff8e5e82f7 2024-11-20T15:22:13,588 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/6ca00cebd45746e0af2e6e267ac9ec00 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/6ca00cebd45746e0af2e6e267ac9ec00 2024-11-20T15:22:13,589 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/d1d76b7118664216b63c3d9577a335a3 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/d1d76b7118664216b63c3d9577a335a3 2024-11-20T15:22:13,590 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/b64b2e48bde04a5a8f46c78f0eae5d11 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/b64b2e48bde04a5a8f46c78f0eae5d11 2024-11-20T15:22:13,591 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/51cd20524bc24ecaadce948884513108 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/51cd20524bc24ecaadce948884513108 2024-11-20T15:22:13,591 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/33360f8b9b6f4965963105120e200617 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/33360f8b9b6f4965963105120e200617 2024-11-20T15:22:13,592 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/e51546b10a814d31919e6671623a8d49 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/e51546b10a814d31919e6671623a8d49 2024-11-20T15:22:13,593 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/c5299258c235490285382b40e8312864 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/c5299258c235490285382b40e8312864 2024-11-20T15:22:13,594 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/78bdcc0a5fc449f7b1d23012f2279df5 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/78bdcc0a5fc449f7b1d23012f2279df5 2024-11-20T15:22:13,595 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/dc0904a8fba44585ae5d65238b860085 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/dc0904a8fba44585ae5d65238b860085 2024-11-20T15:22:13,596 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/742e47353e0845ad94e4778a8e95fc95 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/742e47353e0845ad94e4778a8e95fc95 2024-11-20T15:22:13,597 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/245f3636d81a43f1bbe5db4052805c60 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/245f3636d81a43f1bbe5db4052805c60 2024-11-20T15:22:13,599 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/88bd3a1d8eb94cbaba6f433c7d92c73d to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/88bd3a1d8eb94cbaba6f433c7d92c73d 2024-11-20T15:22:13,600 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/e04fe2942f96437fadd6867880861b55 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/e04fe2942f96437fadd6867880861b55 2024-11-20T15:22:13,601 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/7aab9492769848c08d7ed853b0e534dd to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/7aab9492769848c08d7ed853b0e534dd 2024-11-20T15:22:13,602 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/8e4bb6a87f874ff3b2bd626aa822d428, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/cedf261d0780417ab153821adf31b048, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/eb5ccfb7be9e495dae67c23143c9ef92, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/c1b68c31d3ff4883a50c7b411adebd1a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/100b1fb8ffa6430c812a220e04c90fba, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/6c16f73ba2ba40c08ca03628bca6b784, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/db7c5b806f6c42d4b69c33d48847a56b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/fb54bea9823543d996341075bcb0a012, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/167c1bbee3d2431998187e1a3079d154, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/f732f2fa31654bd9859ef5828824ea78, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/6ccc06ffce16481e958d94022e74bf76, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/a1ed096e1b1045feb5e3f7510087a115, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/1b4955a9922045478b89ddfd53c5871e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/d4c3b85b721d41f29cff6bc1416b948b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/5363fefa8c274a19b4d4fd39a3455002, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/f605331e0e874811aae067ce1c92ce32, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/d1fa9b5f76a948439adf390a0f5f1ded, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/ebb9e112046b4f6f8a682d7a2ae4ed94, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/9284379e4070457096947821266211d7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/e317d80acb6548539815b497a4157f75, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/96a309bd543e4abf8745907c8d96dc4b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/fe750421359a49f1a512434874a1098a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/3e16b40a924d42b4a76fb1cddc2c821d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/6ca27e3d2a9d491db38b91768cdf6daf, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/980a8135b51c4cdc950774cb012639bc] to archive 2024-11-20T15:22:13,602 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T15:22:13,604 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/8e4bb6a87f874ff3b2bd626aa822d428 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/8e4bb6a87f874ff3b2bd626aa822d428 2024-11-20T15:22:13,605 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/cedf261d0780417ab153821adf31b048 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/cedf261d0780417ab153821adf31b048 2024-11-20T15:22:13,606 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/eb5ccfb7be9e495dae67c23143c9ef92 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/eb5ccfb7be9e495dae67c23143c9ef92 2024-11-20T15:22:13,606 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/c1b68c31d3ff4883a50c7b411adebd1a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/c1b68c31d3ff4883a50c7b411adebd1a 2024-11-20T15:22:13,607 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/100b1fb8ffa6430c812a220e04c90fba to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/100b1fb8ffa6430c812a220e04c90fba 2024-11-20T15:22:13,608 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/6c16f73ba2ba40c08ca03628bca6b784 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/6c16f73ba2ba40c08ca03628bca6b784 2024-11-20T15:22:13,609 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/db7c5b806f6c42d4b69c33d48847a56b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/db7c5b806f6c42d4b69c33d48847a56b 2024-11-20T15:22:13,610 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/fb54bea9823543d996341075bcb0a012 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/fb54bea9823543d996341075bcb0a012 2024-11-20T15:22:13,611 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/167c1bbee3d2431998187e1a3079d154 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/167c1bbee3d2431998187e1a3079d154 2024-11-20T15:22:13,612 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/f732f2fa31654bd9859ef5828824ea78 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/f732f2fa31654bd9859ef5828824ea78 2024-11-20T15:22:13,613 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/6ccc06ffce16481e958d94022e74bf76 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/6ccc06ffce16481e958d94022e74bf76 2024-11-20T15:22:13,614 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/a1ed096e1b1045feb5e3f7510087a115 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/a1ed096e1b1045feb5e3f7510087a115 2024-11-20T15:22:13,615 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/1b4955a9922045478b89ddfd53c5871e to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/1b4955a9922045478b89ddfd53c5871e 2024-11-20T15:22:13,616 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/d4c3b85b721d41f29cff6bc1416b948b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/d4c3b85b721d41f29cff6bc1416b948b 2024-11-20T15:22:13,617 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/5363fefa8c274a19b4d4fd39a3455002 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/5363fefa8c274a19b4d4fd39a3455002 2024-11-20T15:22:13,618 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/f605331e0e874811aae067ce1c92ce32 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/f605331e0e874811aae067ce1c92ce32 2024-11-20T15:22:13,619 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/d1fa9b5f76a948439adf390a0f5f1ded to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/d1fa9b5f76a948439adf390a0f5f1ded 2024-11-20T15:22:13,619 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/ebb9e112046b4f6f8a682d7a2ae4ed94 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/ebb9e112046b4f6f8a682d7a2ae4ed94 2024-11-20T15:22:13,620 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/9284379e4070457096947821266211d7 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/9284379e4070457096947821266211d7 2024-11-20T15:22:13,621 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/e317d80acb6548539815b497a4157f75 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/e317d80acb6548539815b497a4157f75 2024-11-20T15:22:13,622 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/96a309bd543e4abf8745907c8d96dc4b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/96a309bd543e4abf8745907c8d96dc4b 2024-11-20T15:22:13,623 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/fe750421359a49f1a512434874a1098a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/fe750421359a49f1a512434874a1098a 2024-11-20T15:22:13,624 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/3e16b40a924d42b4a76fb1cddc2c821d to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/3e16b40a924d42b4a76fb1cddc2c821d 2024-11-20T15:22:13,625 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/6ca27e3d2a9d491db38b91768cdf6daf to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/6ca27e3d2a9d491db38b91768cdf6daf 2024-11-20T15:22:13,626 DEBUG [StoreCloser-TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/980a8135b51c4cdc950774cb012639bc to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/980a8135b51c4cdc950774cb012639bc 2024-11-20T15:22:13,630 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/recovered.edits/379.seqid, newMaxSeqId=379, maxSeqId=4 2024-11-20T15:22:13,631 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621. 2024-11-20T15:22:13,631 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HRegion(1635): Region close journal for 6e74b15d2fcbe758642cc4f5cf813621: 2024-11-20T15:22:13,633 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] handler.UnassignRegionHandler(170): Closed 6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:13,633 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=75 updating hbase:meta row=6e74b15d2fcbe758642cc4f5cf813621, regionState=CLOSED 2024-11-20T15:22:13,635 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-20T15:22:13,635 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; CloseRegionProcedure 6e74b15d2fcbe758642cc4f5cf813621, server=0b62285ead89,33387,1732116069954 in 2.2820 sec 2024-11-20T15:22:13,636 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=75, resume processing ppid=74 2024-11-20T15:22:13,636 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, ppid=74, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e74b15d2fcbe758642cc4f5cf813621, UNASSIGN in 2.2850 sec 2024-11-20T15:22:13,638 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-20T15:22:13,638 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.2880 sec 2024-11-20T15:22:13,638 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732116133638"}]},"ts":"1732116133638"} 2024-11-20T15:22:13,639 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T15:22:13,642 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T15:22:13,643 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.3010 sec 2024-11-20T15:22:14,080 DEBUG [master/0b62285ead89:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region dc4fbec7dbed2ad8a83ee5514bc45c4e changed from -1.0 to 0.0, refreshing cache 2024-11-20T15:22:15,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T15:22:15,449 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-11-20T15:22:15,450 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T15:22:15,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:22:15,452 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=77, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:22:15,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T15:22:15,453 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=77, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:22:15,455 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:15,457 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A, FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B, FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C, FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/recovered.edits] 2024-11-20T15:22:15,461 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/77180fd932124f4c923f5cb8137983d9 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/77180fd932124f4c923f5cb8137983d9 2024-11-20T15:22:15,463 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/e39a56a4e3254cf08351fc043b5d5527 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/A/e39a56a4e3254cf08351fc043b5d5527 2024-11-20T15:22:15,466 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/6e8db44ecb1c490e85a9c02af3c34a81 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/6e8db44ecb1c490e85a9c02af3c34a81 2024-11-20T15:22:15,467 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/af20d3a7281e45ba984e57f27d135472 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/B/af20d3a7281e45ba984e57f27d135472 2024-11-20T15:22:15,470 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/0edfb82ac9da43e3996ce668dce0ccf6 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/0edfb82ac9da43e3996ce668dce0ccf6 2024-11-20T15:22:15,472 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/a155843e917d443dbf45456c9a942938 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/C/a155843e917d443dbf45456c9a942938 2024-11-20T15:22:15,475 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/recovered.edits/379.seqid to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621/recovered.edits/379.seqid 2024-11-20T15:22:15,476 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:15,476 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T15:22:15,477 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T15:22:15,478 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T15:22:15,482 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112016380897f0334fef97436a992ca3d7ce_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112016380897f0334fef97436a992ca3d7ce_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:15,483 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112016a74d769ac94b2dba8d0d3cd4ad6778_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112016a74d769ac94b2dba8d0d3cd4ad6778_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:15,485 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202cbc857d22184708b9794c8baa6b68ab_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202cbc857d22184708b9794c8baa6b68ab_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:15,486 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202cdcce4c7ac949afa6df8a9e72823d12_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202cdcce4c7ac949afa6df8a9e72823d12_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:15,487 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202f9564dd17a341ab9d385c13f77707b6_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202f9564dd17a341ab9d385c13f77707b6_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:15,489 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203e070a91001b4bf08df4468a1feef0b9_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203e070a91001b4bf08df4468a1feef0b9_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:15,490 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204a62c1b37e0a479197bbb21060fb92b9_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204a62c1b37e0a479197bbb21060fb92b9_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:15,491 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206ef0d651eac64da7b9ba749381b2218d_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206ef0d651eac64da7b9ba749381b2218d_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:15,493 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207089c2f367e740109ef01588ee362b7f_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207089c2f367e740109ef01588ee362b7f_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:15,494 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207376f4f4b3da4a8c99daccf252e6f9b9_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207376f4f4b3da4a8c99daccf252e6f9b9_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:15,495 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112073849e91cac54707a9346f64c630e0ba_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112073849e91cac54707a9346f64c630e0ba_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:15,497 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411208c327a2ab3bc41858e22d6e7c92a6c69_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411208c327a2ab3bc41858e22d6e7c92a6c69_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:15,498 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120901386c19e534ef6a973691dd0b4e8a6_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120901386c19e534ef6a973691dd0b4e8a6_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:15,499 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209b4e69037e4943dba865f179140dc2d6_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209b4e69037e4943dba865f179140dc2d6_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:15,501 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a8498e1ba8614b899e3689b31b054a9f_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a8498e1ba8614b899e3689b31b054a9f_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:15,502 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b4062c1068094508a9ea5bb8d4442c11_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b4062c1068094508a9ea5bb8d4442c11_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:15,503 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cfcddc2fe9a4438d8380851ff977efdc_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cfcddc2fe9a4438d8380851ff977efdc_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:15,504 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e84940f5bf6c44d28dfc883b18217be9_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e84940f5bf6c44d28dfc883b18217be9_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:15,506 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120eae6ad0db24f4291bd6c209a9faca2d0_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120eae6ad0db24f4291bd6c209a9faca2d0_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:15,507 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ebe08a856e224cefafd29359969263e2_6e74b15d2fcbe758642cc4f5cf813621 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ebe08a856e224cefafd29359969263e2_6e74b15d2fcbe758642cc4f5cf813621 2024-11-20T15:22:15,508 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T15:22:15,510 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=77, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:22:15,513 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T15:22:15,515 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T15:22:15,516 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=77, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:22:15,516 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T15:22:15,516 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732116135516"}]},"ts":"9223372036854775807"} 2024-11-20T15:22:15,518 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T15:22:15,518 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 6e74b15d2fcbe758642cc4f5cf813621, NAME => 'TestAcidGuarantees,,1732116106610.6e74b15d2fcbe758642cc4f5cf813621.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T15:22:15,518 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T15:22:15,519 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732116135518"}]},"ts":"9223372036854775807"} 2024-11-20T15:22:15,520 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T15:22:15,523 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=77, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:22:15,524 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 73 msec 2024-11-20T15:22:15,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T15:22:15,554 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-20T15:22:15,570 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=241 (was 238) Potentially hanging thread: hconnection-0x10bb86e4-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_905597479_22 at /127.0.0.1:39884 [Waiting for operation #251] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x10bb86e4-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x10bb86e4-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1160552316_22 at /127.0.0.1:39740 [Waiting for operation #300] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/cluster_20e01121-862a-f127-22c7-564b6246e54a/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/cluster_20e01121-862a-f127-22c7-564b6246e54a/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x10bb86e4-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=453 (was 457), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=401 (was 308) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6877 (was 6299) - AvailableMemoryMB LEAK? - 2024-11-20T15:22:15,582 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=241, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=401, ProcessCount=11, AvailableMemoryMB=6875 2024-11-20T15:22:15,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T15:22:15,585 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T15:22:15,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=78, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T15:22:15,587 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=78, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T15:22:15,587 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:15,587 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 78 2024-11-20T15:22:15,588 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=78, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T15:22:15,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=78 2024-11-20T15:22:15,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742094_1270 (size=960) 2024-11-20T15:22:15,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=78 2024-11-20T15:22:15,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=78 2024-11-20T15:22:15,999 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3 2024-11-20T15:22:16,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742095_1271 (size=53) 2024-11-20T15:22:16,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=78 2024-11-20T15:22:16,407 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T15:22:16,407 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 6e158976a217b84d83e0feeee3ad6faf, disabling compactions & flushes 2024-11-20T15:22:16,407 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:16,407 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:16,407 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. after waiting 0 ms 2024-11-20T15:22:16,407 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:16,407 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:16,407 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:16,408 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=78, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T15:22:16,409 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732116136408"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732116136408"}]},"ts":"1732116136408"} 2024-11-20T15:22:16,410 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T15:22:16,411 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=78, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T15:22:16,411 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732116136411"}]},"ts":"1732116136411"} 2024-11-20T15:22:16,412 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T15:22:16,416 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=79, ppid=78, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e158976a217b84d83e0feeee3ad6faf, ASSIGN}] 2024-11-20T15:22:16,418 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=79, ppid=78, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e158976a217b84d83e0feeee3ad6faf, ASSIGN 2024-11-20T15:22:16,418 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=79, ppid=78, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e158976a217b84d83e0feeee3ad6faf, ASSIGN; state=OFFLINE, location=0b62285ead89,33387,1732116069954; forceNewPlan=false, retain=false 2024-11-20T15:22:16,569 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=79 updating hbase:meta row=6e158976a217b84d83e0feeee3ad6faf, regionState=OPENING, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:22:16,570 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; OpenRegionProcedure 6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954}] 2024-11-20T15:22:16,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=78 2024-11-20T15:22:16,722 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:16,725 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:16,726 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7285): Opening region: {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} 2024-11-20T15:22:16,726 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:16,726 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T15:22:16,726 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7327): checking encryption for 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:16,726 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7330): checking classloading for 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:16,727 INFO [StoreOpener-6e158976a217b84d83e0feeee3ad6faf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:16,729 INFO [StoreOpener-6e158976a217b84d83e0feeee3ad6faf-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:22:16,729 INFO [StoreOpener-6e158976a217b84d83e0feeee3ad6faf-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e158976a217b84d83e0feeee3ad6faf columnFamilyName A 2024-11-20T15:22:16,729 DEBUG [StoreOpener-6e158976a217b84d83e0feeee3ad6faf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:16,729 INFO [StoreOpener-6e158976a217b84d83e0feeee3ad6faf-1 {}] regionserver.HStore(327): Store=6e158976a217b84d83e0feeee3ad6faf/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:22:16,730 INFO [StoreOpener-6e158976a217b84d83e0feeee3ad6faf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:16,731 INFO [StoreOpener-6e158976a217b84d83e0feeee3ad6faf-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:22:16,731 INFO [StoreOpener-6e158976a217b84d83e0feeee3ad6faf-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e158976a217b84d83e0feeee3ad6faf columnFamilyName B 2024-11-20T15:22:16,731 DEBUG [StoreOpener-6e158976a217b84d83e0feeee3ad6faf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:16,731 INFO [StoreOpener-6e158976a217b84d83e0feeee3ad6faf-1 {}] regionserver.HStore(327): Store=6e158976a217b84d83e0feeee3ad6faf/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:22:16,731 INFO [StoreOpener-6e158976a217b84d83e0feeee3ad6faf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:16,732 INFO [StoreOpener-6e158976a217b84d83e0feeee3ad6faf-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:22:16,732 INFO [StoreOpener-6e158976a217b84d83e0feeee3ad6faf-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e158976a217b84d83e0feeee3ad6faf columnFamilyName C 2024-11-20T15:22:16,732 DEBUG [StoreOpener-6e158976a217b84d83e0feeee3ad6faf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:16,733 INFO [StoreOpener-6e158976a217b84d83e0feeee3ad6faf-1 {}] regionserver.HStore(327): Store=6e158976a217b84d83e0feeee3ad6faf/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:22:16,733 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:16,734 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:16,734 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:16,735 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T15:22:16,736 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1085): writing seq id for 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:16,738 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T15:22:16,739 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1102): Opened 6e158976a217b84d83e0feeee3ad6faf; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61852326, jitterRate=-0.07832852005958557}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T15:22:16,739 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1001): Region open journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:16,740 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., pid=80, masterSystemTime=1732116136722 2024-11-20T15:22:16,742 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:16,742 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:16,742 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=79 updating hbase:meta row=6e158976a217b84d83e0feeee3ad6faf, regionState=OPEN, openSeqNum=2, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:22:16,745 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-20T15:22:16,745 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; OpenRegionProcedure 6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 in 174 msec 2024-11-20T15:22:16,747 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=79, resume processing ppid=78 2024-11-20T15:22:16,747 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, ppid=78, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e158976a217b84d83e0feeee3ad6faf, ASSIGN in 329 msec 2024-11-20T15:22:16,748 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=78, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T15:22:16,748 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732116136748"}]},"ts":"1732116136748"} 2024-11-20T15:22:16,749 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T15:22:16,752 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=78, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T15:22:16,754 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1670 sec 2024-11-20T15:22:17,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=78 2024-11-20T15:22:17,693 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 78 completed 2024-11-20T15:22:17,695 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3a569490 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c1ac389 2024-11-20T15:22:17,698 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44645c55, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:22:17,700 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:22:17,701 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45968, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:22:17,702 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T15:22:17,703 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50284, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T15:22:17,705 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6862e3ce to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@28e73c0 2024-11-20T15:22:17,708 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64ee0130, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:22:17,710 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d296fed to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7c480dfb 2024-11-20T15:22:17,713 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@683b64c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:22:17,714 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x08d0caa5 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@34cb3991 2024-11-20T15:22:17,719 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e55eb7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:22:17,720 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x43f04e0e to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2e9ae050 2024-11-20T15:22:17,726 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a703d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:22:17,727 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x560ec309 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2fef31f8 2024-11-20T15:22:17,730 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14ed1e44, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:22:17,731 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0eb04aeb to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72537a47 2024-11-20T15:22:17,734 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@88aa519, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:22:17,735 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6a0e9c8f to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@36642cb 2024-11-20T15:22:17,738 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e998dd3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:22:17,743 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d68f787 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c299cfb 2024-11-20T15:22:17,746 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e4c79b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:22:17,747 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x10e6bf6a to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@605827c9 2024-11-20T15:22:17,756 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d1403c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:22:17,757 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1730a60f to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3677bd4f 2024-11-20T15:22:17,760 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3bf0ba59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:22:17,762 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:22:17,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-20T15:22:17,764 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:22:17,764 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:22:17,765 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:22:17,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T15:22:17,768 DEBUG [hconnection-0x40c6f62b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:22:17,768 DEBUG [hconnection-0x52f7c385-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:22:17,768 DEBUG [hconnection-0x68a2ff28-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:22:17,769 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45982, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:22:17,769 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45992, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:22:17,769 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45990, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:22:17,770 DEBUG [hconnection-0x26ae793d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:22:17,770 DEBUG [hconnection-0x469decd5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:22:17,771 DEBUG [hconnection-0x4b02cfaa-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:22:17,771 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46004, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:22:17,771 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46012, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:22:17,772 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46020, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:22:17,781 DEBUG [hconnection-0x606487b4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:22:17,783 DEBUG [hconnection-0x43b46f20-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:22:17,784 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46028, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:22:17,784 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46040, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:22:17,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:17,786 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T15:22:17,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:17,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:17,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:17,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:17,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:17,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:17,788 DEBUG [hconnection-0x80fa607-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:22:17,789 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46052, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:22:17,794 DEBUG [hconnection-0x5c24314-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:22:17,796 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46056, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:22:17,815 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/a2e64598dee34328993d86f469d7f50a is 50, key is test_row_0/A:col10/1732116137784/Put/seqid=0 2024-11-20T15:22:17,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:17,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116197827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:17,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:17,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116197832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:17,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:17,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116197835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:17,842 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:17,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116197839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:17,842 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:17,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116197839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:17,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T15:22:17,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742096_1272 (size=12001) 2024-11-20T15:22:17,916 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:17,917 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T15:22:17,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:17,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:17,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:17,918 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:17,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:17,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:17,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:17,942 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:17,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116197940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:17,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116197940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:17,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:17,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116197940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:17,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:17,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116197943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:17,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:17,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116197946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:18,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T15:22:18,070 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:18,070 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T15:22:18,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:18,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:18,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:18,071 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:18,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:18,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:18,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:18,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116198143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:18,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:18,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116198144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:18,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:18,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116198145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:18,147 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:18,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116198147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:18,150 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:18,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116198149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:18,223 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:18,224 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T15:22:18,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:18,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:18,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:18,224 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:18,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:18,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:18,283 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/a2e64598dee34328993d86f469d7f50a 2024-11-20T15:22:18,312 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/ef132a4a4784415fae5d419afc323c31 is 50, key is test_row_0/B:col10/1732116137784/Put/seqid=0 2024-11-20T15:22:18,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742097_1273 (size=12001) 2024-11-20T15:22:18,350 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/ef132a4a4784415fae5d419afc323c31 2024-11-20T15:22:18,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T15:22:18,378 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:18,378 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T15:22:18,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:18,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:18,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:18,379 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:18,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:18,379 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/7b1a7daa31a5447098fcb91313ef1884 is 50, key is test_row_0/C:col10/1732116137784/Put/seqid=0 2024-11-20T15:22:18,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:18,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742098_1274 (size=12001) 2024-11-20T15:22:18,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:18,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116198447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:18,449 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:18,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116198448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:18,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:18,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116198448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:18,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:18,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116198449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:18,453 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:18,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116198452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:18,534 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:18,535 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T15:22:18,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:18,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:18,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:18,535 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:18,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:18,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:18,688 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:18,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T15:22:18,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:18,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:18,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:18,689 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:18,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:18,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:18,798 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/7b1a7daa31a5447098fcb91313ef1884 2024-11-20T15:22:18,803 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/a2e64598dee34328993d86f469d7f50a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/a2e64598dee34328993d86f469d7f50a 2024-11-20T15:22:18,808 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/a2e64598dee34328993d86f469d7f50a, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T15:22:18,809 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/ef132a4a4784415fae5d419afc323c31 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/ef132a4a4784415fae5d419afc323c31 2024-11-20T15:22:18,814 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/ef132a4a4784415fae5d419afc323c31, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T15:22:18,815 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/7b1a7daa31a5447098fcb91313ef1884 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/7b1a7daa31a5447098fcb91313ef1884 2024-11-20T15:22:18,820 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/7b1a7daa31a5447098fcb91313ef1884, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T15:22:18,821 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 6e158976a217b84d83e0feeee3ad6faf in 1035ms, sequenceid=13, compaction requested=false 2024-11-20T15:22:18,821 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:18,847 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:18,848 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T15:22:18,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:18,848 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T15:22:18,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:18,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:18,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:18,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:18,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:18,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:18,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/27c54bc1fee5481bb0989c062ef351cb is 50, key is test_row_0/A:col10/1732116137816/Put/seqid=0 2024-11-20T15:22:18,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742099_1275 (size=12001) 2024-11-20T15:22:18,869 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/27c54bc1fee5481bb0989c062ef351cb 2024-11-20T15:22:18,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T15:22:18,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/48553e6cb8484825b80e1dfb740d517e is 50, key is test_row_0/B:col10/1732116137816/Put/seqid=0 2024-11-20T15:22:18,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742100_1276 (size=12001) 2024-11-20T15:22:18,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:18,954 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:18,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:18,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116198962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:18,966 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:18,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116198963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:18,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:18,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116198965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:18,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:18,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116198965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:18,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:18,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116198966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:19,067 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:19,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116199066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:19,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:19,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116199067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:19,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:19,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116199068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:19,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:19,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116199070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:19,072 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:19,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116199071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:19,180 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T15:22:19,270 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:19,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116199269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:19,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:19,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116199270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:19,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:19,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116199272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:19,275 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:19,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116199272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:19,275 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:19,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116199273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:19,289 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/48553e6cb8484825b80e1dfb740d517e 2024-11-20T15:22:19,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/1bcaca71aaa14b9baffabaea4bdcd4cb is 50, key is test_row_0/C:col10/1732116137816/Put/seqid=0 2024-11-20T15:22:19,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742101_1277 (size=12001) 2024-11-20T15:22:19,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:19,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116199573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:19,575 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:19,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116199574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:19,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:19,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116199576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:19,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:19,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116199576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:19,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:19,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116199577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:19,745 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/1bcaca71aaa14b9baffabaea4bdcd4cb 2024-11-20T15:22:19,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/27c54bc1fee5481bb0989c062ef351cb as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/27c54bc1fee5481bb0989c062ef351cb 2024-11-20T15:22:19,760 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/27c54bc1fee5481bb0989c062ef351cb, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T15:22:19,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/48553e6cb8484825b80e1dfb740d517e as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/48553e6cb8484825b80e1dfb740d517e 2024-11-20T15:22:19,766 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/48553e6cb8484825b80e1dfb740d517e, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T15:22:19,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/1bcaca71aaa14b9baffabaea4bdcd4cb as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/1bcaca71aaa14b9baffabaea4bdcd4cb 2024-11-20T15:22:19,772 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/1bcaca71aaa14b9baffabaea4bdcd4cb, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T15:22:19,773 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=73.80 KB/75570 for 6e158976a217b84d83e0feeee3ad6faf in 925ms, sequenceid=37, compaction requested=false 2024-11-20T15:22:19,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:19,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:19,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-20T15:22:19,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-20T15:22:19,776 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-20T15:22:19,776 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0100 sec 2024-11-20T15:22:19,777 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 2.0140 sec 2024-11-20T15:22:19,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T15:22:19,871 INFO [Thread-1245 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-20T15:22:19,872 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:22:19,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-11-20T15:22:19,874 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:22:19,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T15:22:19,875 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:22:19,875 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:22:19,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T15:22:20,028 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:20,029 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T15:22:20,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:20,029 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T15:22:20,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:20,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:20,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:20,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:20,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:20,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:20,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/d881268b5b62439fa1036f81b2457195 is 50, key is test_row_0/A:col10/1732116138963/Put/seqid=0 2024-11-20T15:22:20,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742102_1278 (size=12001) 2024-11-20T15:22:20,057 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/d881268b5b62439fa1036f81b2457195 2024-11-20T15:22:20,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/e52fa97cd32d4c65bfe9d854b5241592 is 50, key is test_row_0/B:col10/1732116138963/Put/seqid=0 2024-11-20T15:22:20,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742103_1279 (size=12001) 2024-11-20T15:22:20,082 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/e52fa97cd32d4c65bfe9d854b5241592 2024-11-20T15:22:20,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:20,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:20,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/7e18209cbc244cae9d30e0cfe439a6da is 50, key is test_row_0/C:col10/1732116138963/Put/seqid=0 2024-11-20T15:22:20,124 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:20,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116200118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:20,124 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:20,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116200117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:20,125 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:20,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116200119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:20,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:20,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116200122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:20,126 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:20,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116200122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:20,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742104_1280 (size=12001) 2024-11-20T15:22:20,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T15:22:20,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:20,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116200226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:20,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:20,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116200226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:20,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:20,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116200226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:20,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:20,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116200227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:20,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:20,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116200227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:20,429 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:20,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116200428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:20,431 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:20,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116200429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:20,431 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:20,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116200429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:20,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:20,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116200429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:20,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:20,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116200430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:20,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T15:22:20,547 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/7e18209cbc244cae9d30e0cfe439a6da 2024-11-20T15:22:20,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/d881268b5b62439fa1036f81b2457195 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/d881268b5b62439fa1036f81b2457195 2024-11-20T15:22:20,558 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/d881268b5b62439fa1036f81b2457195, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T15:22:20,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/e52fa97cd32d4c65bfe9d854b5241592 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/e52fa97cd32d4c65bfe9d854b5241592 2024-11-20T15:22:20,563 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/e52fa97cd32d4c65bfe9d854b5241592, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T15:22:20,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/7e18209cbc244cae9d30e0cfe439a6da as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/7e18209cbc244cae9d30e0cfe439a6da 2024-11-20T15:22:20,568 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/7e18209cbc244cae9d30e0cfe439a6da, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T15:22:20,569 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 6e158976a217b84d83e0feeee3ad6faf in 540ms, sequenceid=51, compaction requested=true 2024-11-20T15:22:20,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:20,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:20,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-11-20T15:22:20,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-11-20T15:22:20,572 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-20T15:22:20,572 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 696 msec 2024-11-20T15:22:20,574 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 701 msec 2024-11-20T15:22:20,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:20,735 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T15:22:20,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:20,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:20,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:20,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:20,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:20,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:20,743 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/75e3eb8ba0eb4fe3872b38117c2fa6e4 is 50, key is test_row_0/A:col10/1732116140735/Put/seqid=0 2024-11-20T15:22:20,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:20,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116200744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:20,748 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:20,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116200745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:20,748 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:20,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116200745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:20,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:20,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116200746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:20,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:20,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116200747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:20,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742105_1281 (size=12001) 2024-11-20T15:22:20,850 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:20,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116200848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:20,851 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:20,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116200849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:20,852 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:20,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116200850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:20,852 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:20,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116200850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:20,852 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:20,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116200850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:20,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T15:22:20,979 INFO [Thread-1245 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-20T15:22:20,980 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:22:20,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-11-20T15:22:20,982 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:22:20,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T15:22:20,983 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:22:20,983 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:22:21,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:21,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116201051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:21,055 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:21,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116201053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:21,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:21,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116201054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:21,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:21,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116201054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:21,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:21,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116201057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:21,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T15:22:21,135 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:21,136 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T15:22:21,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:21,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:21,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:21,136 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:21,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:21,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:21,159 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/75e3eb8ba0eb4fe3872b38117c2fa6e4 2024-11-20T15:22:21,169 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/3cb5856de83643e085ee3ed70f4ec069 is 50, key is test_row_0/B:col10/1732116140735/Put/seqid=0 2024-11-20T15:22:21,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742106_1282 (size=12001) 2024-11-20T15:22:21,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T15:22:21,293 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:21,293 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T15:22:21,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:21,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:21,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:21,294 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:21,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:21,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:21,356 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:21,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116201355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:21,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:21,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116201357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:21,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:21,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116201357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:21,360 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:21,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116201357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:21,364 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:21,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116201364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:21,446 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:21,447 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T15:22:21,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:21,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:21,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:21,448 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:21,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:21,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:21,575 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/3cb5856de83643e085ee3ed70f4ec069 2024-11-20T15:22:21,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T15:22:21,599 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/d87f8ebd722a49819de1abd679adaf41 is 50, key is test_row_0/C:col10/1732116140735/Put/seqid=0 2024-11-20T15:22:21,600 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:21,601 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T15:22:21,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:21,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:21,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:21,601 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:21,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:21,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:21,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742107_1283 (size=12001) 2024-11-20T15:22:21,647 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/d87f8ebd722a49819de1abd679adaf41 2024-11-20T15:22:21,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/75e3eb8ba0eb4fe3872b38117c2fa6e4 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/75e3eb8ba0eb4fe3872b38117c2fa6e4 2024-11-20T15:22:21,660 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/75e3eb8ba0eb4fe3872b38117c2fa6e4, entries=150, sequenceid=76, filesize=11.7 K 2024-11-20T15:22:21,661 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/3cb5856de83643e085ee3ed70f4ec069 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3cb5856de83643e085ee3ed70f4ec069 2024-11-20T15:22:21,667 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3cb5856de83643e085ee3ed70f4ec069, entries=150, sequenceid=76, filesize=11.7 K 2024-11-20T15:22:21,668 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/d87f8ebd722a49819de1abd679adaf41 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/d87f8ebd722a49819de1abd679adaf41 2024-11-20T15:22:21,673 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/d87f8ebd722a49819de1abd679adaf41, entries=150, sequenceid=76, filesize=11.7 K 2024-11-20T15:22:21,673 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 6e158976a217b84d83e0feeee3ad6faf in 939ms, sequenceid=76, compaction requested=true 2024-11-20T15:22:21,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:21,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:22:21,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:21,674 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:22:21,674 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:22:21,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:22:21,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:21,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:22:21,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:21,676 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:22:21,676 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/A is initiating minor compaction (all files) 2024-11-20T15:22:21,676 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/A in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:21,676 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/a2e64598dee34328993d86f469d7f50a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/27c54bc1fee5481bb0989c062ef351cb, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/d881268b5b62439fa1036f81b2457195, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/75e3eb8ba0eb4fe3872b38117c2fa6e4] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=46.9 K 2024-11-20T15:22:21,676 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:22:21,676 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting a2e64598dee34328993d86f469d7f50a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732116137778 2024-11-20T15:22:21,676 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/B is initiating minor compaction (all files) 2024-11-20T15:22:21,676 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/B in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:21,677 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/ef132a4a4784415fae5d419afc323c31, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/48553e6cb8484825b80e1dfb740d517e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/e52fa97cd32d4c65bfe9d854b5241592, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3cb5856de83643e085ee3ed70f4ec069] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=46.9 K 2024-11-20T15:22:21,677 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting ef132a4a4784415fae5d419afc323c31, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732116137778 2024-11-20T15:22:21,677 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 27c54bc1fee5481bb0989c062ef351cb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732116137816 2024-11-20T15:22:21,678 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting d881268b5b62439fa1036f81b2457195, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732116138961 2024-11-20T15:22:21,678 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 48553e6cb8484825b80e1dfb740d517e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732116137816 2024-11-20T15:22:21,678 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 75e3eb8ba0eb4fe3872b38117c2fa6e4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732116140118 2024-11-20T15:22:21,678 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting e52fa97cd32d4c65bfe9d854b5241592, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732116138961 2024-11-20T15:22:21,679 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 3cb5856de83643e085ee3ed70f4ec069, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732116140118 2024-11-20T15:22:21,699 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#A#compaction#237 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:21,700 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/a6851546ee864741a2f6077bc36ea4dc is 50, key is test_row_0/A:col10/1732116140735/Put/seqid=0 2024-11-20T15:22:21,706 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#B#compaction#238 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:21,707 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/3e394429d12246779e81eb1708e32fa5 is 50, key is test_row_0/B:col10/1732116140735/Put/seqid=0 2024-11-20T15:22:21,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742108_1284 (size=12139) 2024-11-20T15:22:21,752 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/a6851546ee864741a2f6077bc36ea4dc as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/a6851546ee864741a2f6077bc36ea4dc 2024-11-20T15:22:21,753 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:21,753 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T15:22:21,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:21,754 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T15:22:21,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:21,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:21,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:21,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:21,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:21,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:21,760 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/A of 6e158976a217b84d83e0feeee3ad6faf into a6851546ee864741a2f6077bc36ea4dc(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:21,760 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:21,760 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/A, priority=12, startTime=1732116141674; duration=0sec 2024-11-20T15:22:21,760 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:21,760 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:A 2024-11-20T15:22:21,760 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:22:21,762 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:22:21,762 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/C is initiating minor compaction (all files) 2024-11-20T15:22:21,762 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/C in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:21,762 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/7b1a7daa31a5447098fcb91313ef1884, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/1bcaca71aaa14b9baffabaea4bdcd4cb, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/7e18209cbc244cae9d30e0cfe439a6da, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/d87f8ebd722a49819de1abd679adaf41] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=46.9 K 2024-11-20T15:22:21,762 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b1a7daa31a5447098fcb91313ef1884, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732116137778 2024-11-20T15:22:21,763 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1bcaca71aaa14b9baffabaea4bdcd4cb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732116137816 2024-11-20T15:22:21,763 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e18209cbc244cae9d30e0cfe439a6da, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732116138961 2024-11-20T15:22:21,764 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting d87f8ebd722a49819de1abd679adaf41, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732116140118 2024-11-20T15:22:21,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/967f937339d6420cbbf8886dc6c6cbf3 is 50, key is test_row_0/A:col10/1732116140745/Put/seqid=0 2024-11-20T15:22:21,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742109_1285 (size=12139) 2024-11-20T15:22:21,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742110_1286 (size=12001) 2024-11-20T15:22:21,792 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/967f937339d6420cbbf8886dc6c6cbf3 2024-11-20T15:22:21,804 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#C#compaction#240 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:21,805 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/034b3ed523064665b7b76f8ed2814839 is 50, key is test_row_0/C:col10/1732116140735/Put/seqid=0 2024-11-20T15:22:21,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/5763d7236ae04e5a9e2669027d0e7876 is 50, key is test_row_0/B:col10/1732116140745/Put/seqid=0 2024-11-20T15:22:21,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742111_1287 (size=12139) 2024-11-20T15:22:21,839 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/034b3ed523064665b7b76f8ed2814839 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/034b3ed523064665b7b76f8ed2814839 2024-11-20T15:22:21,846 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/C of 6e158976a217b84d83e0feeee3ad6faf into 034b3ed523064665b7b76f8ed2814839(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:21,846 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:21,846 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/C, priority=12, startTime=1732116141674; duration=0sec 2024-11-20T15:22:21,846 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:21,846 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:C 2024-11-20T15:22:21,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742112_1288 (size=12001) 2024-11-20T15:22:21,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:21,866 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:21,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:21,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116201882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:21,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:21,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116201882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:21,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:21,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116201883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:21,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:21,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116201885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:21,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:21,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116201885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:21,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:21,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116201992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:21,993 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:21,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116201992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:21,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:21,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116201992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:21,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:21,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116201992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:21,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:21,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116201992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:22,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T15:22:22,179 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/3e394429d12246779e81eb1708e32fa5 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3e394429d12246779e81eb1708e32fa5 2024-11-20T15:22:22,186 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/B of 6e158976a217b84d83e0feeee3ad6faf into 3e394429d12246779e81eb1708e32fa5(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:22,187 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:22,187 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/B, priority=12, startTime=1732116141674; duration=0sec 2024-11-20T15:22:22,187 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:22,187 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:B 2024-11-20T15:22:22,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:22,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:22,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116202195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:22,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116202195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:22,197 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:22,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116202196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:22,198 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:22,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116202197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:22,205 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:22,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116202204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:22,248 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/5763d7236ae04e5a9e2669027d0e7876 2024-11-20T15:22:22,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/024d7cc4047f434d90b18707b20a9fdf is 50, key is test_row_0/C:col10/1732116140745/Put/seqid=0 2024-11-20T15:22:22,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742113_1289 (size=12001) 2024-11-20T15:22:22,499 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:22,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116202498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:22,501 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:22,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116202500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:22,501 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:22,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116202500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:22,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:22,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116202502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:22,508 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:22,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116202508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:22,671 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/024d7cc4047f434d90b18707b20a9fdf 2024-11-20T15:22:22,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/967f937339d6420cbbf8886dc6c6cbf3 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/967f937339d6420cbbf8886dc6c6cbf3 2024-11-20T15:22:22,685 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/967f937339d6420cbbf8886dc6c6cbf3, entries=150, sequenceid=87, filesize=11.7 K 2024-11-20T15:22:22,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/5763d7236ae04e5a9e2669027d0e7876 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/5763d7236ae04e5a9e2669027d0e7876 2024-11-20T15:22:22,710 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/5763d7236ae04e5a9e2669027d0e7876, entries=150, sequenceid=87, filesize=11.7 K 2024-11-20T15:22:22,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/024d7cc4047f434d90b18707b20a9fdf as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/024d7cc4047f434d90b18707b20a9fdf 2024-11-20T15:22:22,719 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/024d7cc4047f434d90b18707b20a9fdf, entries=150, sequenceid=87, filesize=11.7 K 2024-11-20T15:22:22,720 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 6e158976a217b84d83e0feeee3ad6faf in 966ms, sequenceid=87, compaction requested=false 2024-11-20T15:22:22,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:22,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:22,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-20T15:22:22,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-11-20T15:22:22,723 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-20T15:22:22,723 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7390 sec 2024-11-20T15:22:22,724 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 1.7440 sec 2024-11-20T15:22:23,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:23,005 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T15:22:23,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:23,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:23,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:23,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:23,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:23,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:23,010 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/5eb22de5fefb4ab9a583e5f1b7b37315 is 50, key is test_row_0/A:col10/1732116141883/Put/seqid=0 2024-11-20T15:22:23,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:23,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116203010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:23,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:23,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116203011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:23,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:23,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116203012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:23,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:23,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116203012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:23,014 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:23,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116203012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:23,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742114_1290 (size=12001) 2024-11-20T15:22:23,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T15:22:23,088 INFO [Thread-1245 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-11-20T15:22:23,089 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:22:23,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-11-20T15:22:23,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T15:22:23,091 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:22:23,092 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:22:23,092 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:22:23,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:23,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116203113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:23,115 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:23,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:23,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116203113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:23,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116203113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:23,115 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:23,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116203113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:23,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T15:22:23,245 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:23,245 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-20T15:22:23,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:23,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:23,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:23,246 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:23,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:23,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:23,318 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:23,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116203316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:23,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:23,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:23,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116203317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:23,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116203317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:23,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:23,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116203317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:23,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T15:22:23,399 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:23,399 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-20T15:22:23,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:23,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:23,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:23,400 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:23,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:23,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:23,418 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/5eb22de5fefb4ab9a583e5f1b7b37315 2024-11-20T15:22:23,426 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/a88ea9dec5fa4d86aac4c5f41af577a6 is 50, key is test_row_0/B:col10/1732116141883/Put/seqid=0 2024-11-20T15:22:23,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742115_1291 (size=12001) 2024-11-20T15:22:23,432 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/a88ea9dec5fa4d86aac4c5f41af577a6 2024-11-20T15:22:23,441 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/a0fa50127d4642adbfaec422ed4de404 is 50, key is test_row_0/C:col10/1732116141883/Put/seqid=0 2024-11-20T15:22:23,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742116_1292 (size=12001) 2024-11-20T15:22:23,552 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:23,553 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-20T15:22:23,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:23,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:23,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:23,553 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:23,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:23,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:23,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:23,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:23,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116203621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:23,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116203621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:23,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:23,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116203622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:23,624 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:23,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116203622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:23,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T15:22:23,705 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:23,706 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-20T15:22:23,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:23,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:23,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:23,706 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:23,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:23,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:23,846 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/a0fa50127d4642adbfaec422ed4de404 2024-11-20T15:22:23,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/5eb22de5fefb4ab9a583e5f1b7b37315 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/5eb22de5fefb4ab9a583e5f1b7b37315 2024-11-20T15:22:23,858 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:23,858 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/5eb22de5fefb4ab9a583e5f1b7b37315, entries=150, sequenceid=117, filesize=11.7 K 2024-11-20T15:22:23,859 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-20T15:22:23,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:23,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:23,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:23,859 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:23,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:23,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:23,872 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/a88ea9dec5fa4d86aac4c5f41af577a6 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/a88ea9dec5fa4d86aac4c5f41af577a6 2024-11-20T15:22:23,879 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/a88ea9dec5fa4d86aac4c5f41af577a6, entries=150, sequenceid=117, filesize=11.7 K 2024-11-20T15:22:23,879 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/a0fa50127d4642adbfaec422ed4de404 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/a0fa50127d4642adbfaec422ed4de404 2024-11-20T15:22:23,887 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/a0fa50127d4642adbfaec422ed4de404, entries=150, sequenceid=117, filesize=11.7 K 2024-11-20T15:22:23,888 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 6e158976a217b84d83e0feeee3ad6faf in 883ms, sequenceid=117, compaction requested=true 2024-11-20T15:22:23,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:23,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:22:23,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:23,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:22:23,888 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:23,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:23,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:22:23,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T15:22:23,889 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:23,890 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:23,890 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/A is initiating minor compaction (all files) 2024-11-20T15:22:23,890 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/A in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:23,890 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/a6851546ee864741a2f6077bc36ea4dc, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/967f937339d6420cbbf8886dc6c6cbf3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/5eb22de5fefb4ab9a583e5f1b7b37315] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=35.3 K 2024-11-20T15:22:23,890 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:23,891 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/B is initiating minor compaction (all files) 2024-11-20T15:22:23,891 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/B in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:23,891 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3e394429d12246779e81eb1708e32fa5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/5763d7236ae04e5a9e2669027d0e7876, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/a88ea9dec5fa4d86aac4c5f41af577a6] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=35.3 K 2024-11-20T15:22:23,891 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting a6851546ee864741a2f6077bc36ea4dc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732116140118 2024-11-20T15:22:23,891 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 967f937339d6420cbbf8886dc6c6cbf3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732116140739 2024-11-20T15:22:23,891 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e394429d12246779e81eb1708e32fa5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732116140118 2024-11-20T15:22:23,892 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 5763d7236ae04e5a9e2669027d0e7876, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732116140739 2024-11-20T15:22:23,892 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5eb22de5fefb4ab9a583e5f1b7b37315, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732116141883 2024-11-20T15:22:23,892 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting a88ea9dec5fa4d86aac4c5f41af577a6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732116141883 2024-11-20T15:22:23,908 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#A#compaction#246 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:23,908 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/853c5a73d6dc442f823675425a4f25f6 is 50, key is test_row_0/A:col10/1732116141883/Put/seqid=0 2024-11-20T15:22:23,915 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#B#compaction#247 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:23,916 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/9cd5f892e47640db8cfa737f3e269917 is 50, key is test_row_0/B:col10/1732116141883/Put/seqid=0 2024-11-20T15:22:23,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742117_1293 (size=12241) 2024-11-20T15:22:23,949 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/853c5a73d6dc442f823675425a4f25f6 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/853c5a73d6dc442f823675425a4f25f6 2024-11-20T15:22:23,956 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/A of 6e158976a217b84d83e0feeee3ad6faf into 853c5a73d6dc442f823675425a4f25f6(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:23,956 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:23,956 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/A, priority=13, startTime=1732116143888; duration=0sec 2024-11-20T15:22:23,956 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:23,956 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:A 2024-11-20T15:22:23,957 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:23,958 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:23,958 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/C is initiating minor compaction (all files) 2024-11-20T15:22:23,958 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/C in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:23,958 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/034b3ed523064665b7b76f8ed2814839, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/024d7cc4047f434d90b18707b20a9fdf, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/a0fa50127d4642adbfaec422ed4de404] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=35.3 K 2024-11-20T15:22:23,959 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 034b3ed523064665b7b76f8ed2814839, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732116140118 2024-11-20T15:22:23,959 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 024d7cc4047f434d90b18707b20a9fdf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732116140739 2024-11-20T15:22:23,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742118_1294 (size=12241) 2024-11-20T15:22:23,960 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting a0fa50127d4642adbfaec422ed4de404, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732116141883 2024-11-20T15:22:23,973 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/9cd5f892e47640db8cfa737f3e269917 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/9cd5f892e47640db8cfa737f3e269917 2024-11-20T15:22:23,976 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#C#compaction#248 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:23,977 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/43598885ecd1487b9c1a1cd09373f7bb is 50, key is test_row_0/C:col10/1732116141883/Put/seqid=0 2024-11-20T15:22:23,981 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/B of 6e158976a217b84d83e0feeee3ad6faf into 9cd5f892e47640db8cfa737f3e269917(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:23,981 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:23,981 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/B, priority=13, startTime=1732116143888; duration=0sec 2024-11-20T15:22:23,981 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:23,981 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:B 2024-11-20T15:22:23,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742119_1295 (size=12241) 2024-11-20T15:22:24,007 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/43598885ecd1487b9c1a1cd09373f7bb as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/43598885ecd1487b9c1a1cd09373f7bb 2024-11-20T15:22:24,012 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:24,012 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-20T15:22:24,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:24,013 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T15:22:24,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:24,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:24,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:24,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:24,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:24,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:24,014 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/C of 6e158976a217b84d83e0feeee3ad6faf into 43598885ecd1487b9c1a1cd09373f7bb(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:24,014 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:24,014 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/C, priority=13, startTime=1732116143888; duration=0sec 2024-11-20T15:22:24,015 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:24,015 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:C 2024-11-20T15:22:24,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/2e82b92e4bf543dba7cbcceac91203e3 is 50, key is test_row_0/A:col10/1732116143009/Put/seqid=0 2024-11-20T15:22:24,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:24,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:24,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742120_1296 (size=12001) 2024-11-20T15:22:24,039 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/2e82b92e4bf543dba7cbcceac91203e3 2024-11-20T15:22:24,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/22f02b05372a481fb5e2cdf0425d3975 is 50, key is test_row_0/B:col10/1732116143009/Put/seqid=0 2024-11-20T15:22:24,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742121_1297 (size=12001) 2024-11-20T15:22:24,073 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/22f02b05372a481fb5e2cdf0425d3975 2024-11-20T15:22:24,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/a2275c827a5940ea9ecdc22d62c606e4 is 50, key is test_row_0/C:col10/1732116143009/Put/seqid=0 2024-11-20T15:22:24,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742122_1298 (size=12001) 2024-11-20T15:22:24,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:24,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116204103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:24,127 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:24,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116204126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:24,128 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:24,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116204127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:24,129 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:24,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116204128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:24,129 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:24,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116204128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:24,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T15:22:24,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:24,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116204205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:24,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:24,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116204408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:24,490 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/a2275c827a5940ea9ecdc22d62c606e4 2024-11-20T15:22:24,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/2e82b92e4bf543dba7cbcceac91203e3 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/2e82b92e4bf543dba7cbcceac91203e3 2024-11-20T15:22:24,502 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/2e82b92e4bf543dba7cbcceac91203e3, entries=150, sequenceid=129, filesize=11.7 K 2024-11-20T15:22:24,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/22f02b05372a481fb5e2cdf0425d3975 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/22f02b05372a481fb5e2cdf0425d3975 2024-11-20T15:22:24,506 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/22f02b05372a481fb5e2cdf0425d3975, entries=150, sequenceid=129, filesize=11.7 K 2024-11-20T15:22:24,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/a2275c827a5940ea9ecdc22d62c606e4 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/a2275c827a5940ea9ecdc22d62c606e4 2024-11-20T15:22:24,511 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/a2275c827a5940ea9ecdc22d62c606e4, entries=150, sequenceid=129, filesize=11.7 K 2024-11-20T15:22:24,512 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 6e158976a217b84d83e0feeee3ad6faf in 499ms, sequenceid=129, compaction requested=false 2024-11-20T15:22:24,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:24,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:24,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-11-20T15:22:24,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-11-20T15:22:24,515 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-11-20T15:22:24,515 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4220 sec 2024-11-20T15:22:24,516 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 1.4260 sec 2024-11-20T15:22:24,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:24,715 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T15:22:24,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:24,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:24,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:24,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:24,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:24,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:24,721 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/9b989a85e28d4598882edb4e546427de is 50, key is test_row_0/A:col10/1732116144098/Put/seqid=0 2024-11-20T15:22:24,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:24,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116204729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:24,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742123_1299 (size=12151) 2024-11-20T15:22:24,833 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:24,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116204832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:25,036 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:25,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116205035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:25,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:25,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116205129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:25,137 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:25,137 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:25,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116205136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:25,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116205136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:25,138 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/9b989a85e28d4598882edb4e546427de 2024-11-20T15:22:25,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:25,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116205138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:25,147 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/108f0a8a384e46f98b37cb6018a0fdcb is 50, key is test_row_0/B:col10/1732116144098/Put/seqid=0 2024-11-20T15:22:25,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742124_1300 (size=12151) 2024-11-20T15:22:25,168 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/108f0a8a384e46f98b37cb6018a0fdcb 2024-11-20T15:22:25,178 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/22375681ecf24b4781eb19ad0722e75f is 50, key is test_row_0/C:col10/1732116144098/Put/seqid=0 2024-11-20T15:22:25,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742125_1301 (size=12151) 2024-11-20T15:22:25,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T15:22:25,195 INFO [Thread-1245 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-11-20T15:22:25,196 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:22:25,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees 2024-11-20T15:22:25,197 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:22:25,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-20T15:22:25,198 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:22:25,198 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:22:25,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-20T15:22:25,340 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:25,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116205339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:25,350 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:25,350 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-20T15:22:25,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:25,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:25,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:25,351 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:25,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:25,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:25,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-20T15:22:25,503 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:25,504 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-20T15:22:25,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:25,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:25,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:25,504 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:25,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:25,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:25,583 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/22375681ecf24b4781eb19ad0722e75f 2024-11-20T15:22:25,594 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/9b989a85e28d4598882edb4e546427de as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/9b989a85e28d4598882edb4e546427de 2024-11-20T15:22:25,603 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/9b989a85e28d4598882edb4e546427de, entries=150, sequenceid=157, filesize=11.9 K 2024-11-20T15:22:25,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/108f0a8a384e46f98b37cb6018a0fdcb as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/108f0a8a384e46f98b37cb6018a0fdcb 2024-11-20T15:22:25,607 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/108f0a8a384e46f98b37cb6018a0fdcb, entries=150, sequenceid=157, filesize=11.9 K 2024-11-20T15:22:25,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/22375681ecf24b4781eb19ad0722e75f as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/22375681ecf24b4781eb19ad0722e75f 2024-11-20T15:22:25,616 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/22375681ecf24b4781eb19ad0722e75f, entries=150, sequenceid=157, filesize=11.9 K 2024-11-20T15:22:25,617 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 6e158976a217b84d83e0feeee3ad6faf in 902ms, sequenceid=157, compaction requested=true 2024-11-20T15:22:25,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:25,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:22:25,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:25,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:22:25,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:25,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:22:25,617 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:25,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:25,617 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:25,618 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:25,618 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:25,618 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/A is initiating minor compaction (all files) 2024-11-20T15:22:25,618 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/B is initiating minor compaction (all files) 2024-11-20T15:22:25,618 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/A in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:25,618 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/B in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:25,618 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/853c5a73d6dc442f823675425a4f25f6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/2e82b92e4bf543dba7cbcceac91203e3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/9b989a85e28d4598882edb4e546427de] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=35.5 K 2024-11-20T15:22:25,618 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/9cd5f892e47640db8cfa737f3e269917, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/22f02b05372a481fb5e2cdf0425d3975, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/108f0a8a384e46f98b37cb6018a0fdcb] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=35.5 K 2024-11-20T15:22:25,619 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 853c5a73d6dc442f823675425a4f25f6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732116141883 2024-11-20T15:22:25,619 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 9cd5f892e47640db8cfa737f3e269917, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732116141883 2024-11-20T15:22:25,619 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2e82b92e4bf543dba7cbcceac91203e3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732116143009 2024-11-20T15:22:25,619 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 22f02b05372a481fb5e2cdf0425d3975, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732116143009 2024-11-20T15:22:25,620 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b989a85e28d4598882edb4e546427de, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732116144098 2024-11-20T15:22:25,620 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 108f0a8a384e46f98b37cb6018a0fdcb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732116144098 2024-11-20T15:22:25,629 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#B#compaction#255 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:25,630 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/313d31bf05aa450cad02f11a041263f8 is 50, key is test_row_0/B:col10/1732116144098/Put/seqid=0 2024-11-20T15:22:25,638 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#A#compaction#256 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:25,639 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/d27becdf76e14a9d8042508cd8157ff2 is 50, key is test_row_0/A:col10/1732116144098/Put/seqid=0 2024-11-20T15:22:25,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742126_1302 (size=12493) 2024-11-20T15:22:25,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742127_1303 (size=12493) 2024-11-20T15:22:25,658 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:25,658 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-20T15:22:25,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:25,659 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-20T15:22:25,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:25,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:25,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:25,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:25,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:25,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:25,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/bc6afc7556954058bf134a39493b24d6 is 50, key is test_row_0/A:col10/1732116144728/Put/seqid=0 2024-11-20T15:22:25,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742128_1304 (size=12151) 2024-11-20T15:22:25,668 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/bc6afc7556954058bf134a39493b24d6 2024-11-20T15:22:25,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/b0a3ff9d3494446c93e384797ab6d8fe is 50, key is test_row_0/B:col10/1732116144728/Put/seqid=0 2024-11-20T15:22:25,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742129_1305 (size=12151) 2024-11-20T15:22:25,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-20T15:22:25,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:25,846 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:25,913 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:25,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116205910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:26,016 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:26,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116206014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:26,049 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/313d31bf05aa450cad02f11a041263f8 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/313d31bf05aa450cad02f11a041263f8 2024-11-20T15:22:26,060 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/B of 6e158976a217b84d83e0feeee3ad6faf into 313d31bf05aa450cad02f11a041263f8(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:26,060 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:26,060 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/B, priority=13, startTime=1732116145617; duration=0sec 2024-11-20T15:22:26,060 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:26,060 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:B 2024-11-20T15:22:26,060 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:26,061 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/d27becdf76e14a9d8042508cd8157ff2 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/d27becdf76e14a9d8042508cd8157ff2 2024-11-20T15:22:26,062 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:26,062 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/C is initiating minor compaction (all files) 2024-11-20T15:22:26,062 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/C in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:26,063 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/43598885ecd1487b9c1a1cd09373f7bb, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/a2275c827a5940ea9ecdc22d62c606e4, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/22375681ecf24b4781eb19ad0722e75f] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=35.5 K 2024-11-20T15:22:26,063 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 43598885ecd1487b9c1a1cd09373f7bb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732116141883 2024-11-20T15:22:26,064 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting a2275c827a5940ea9ecdc22d62c606e4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732116143009 2024-11-20T15:22:26,065 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 22375681ecf24b4781eb19ad0722e75f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732116144098 2024-11-20T15:22:26,066 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/A of 6e158976a217b84d83e0feeee3ad6faf into d27becdf76e14a9d8042508cd8157ff2(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:26,066 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:26,066 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/A, priority=13, startTime=1732116145617; duration=0sec 2024-11-20T15:22:26,067 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:26,067 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:A 2024-11-20T15:22:26,075 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#C#compaction#259 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:26,075 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/fded501adc31491783bcab67191756c2 is 50, key is test_row_0/C:col10/1732116144098/Put/seqid=0 2024-11-20T15:22:26,082 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/b0a3ff9d3494446c93e384797ab6d8fe 2024-11-20T15:22:26,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742130_1306 (size=12493) 2024-11-20T15:22:26,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/1bdd3445e3a5441c9ee81690136b772c is 50, key is test_row_0/C:col10/1732116144728/Put/seqid=0 2024-11-20T15:22:26,102 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/fded501adc31491783bcab67191756c2 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/fded501adc31491783bcab67191756c2 2024-11-20T15:22:26,108 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/C of 6e158976a217b84d83e0feeee3ad6faf into fded501adc31491783bcab67191756c2(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:26,108 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:26,108 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/C, priority=13, startTime=1732116145617; duration=0sec 2024-11-20T15:22:26,109 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:26,109 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:C 2024-11-20T15:22:26,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742131_1307 (size=12151) 2024-11-20T15:22:26,115 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/1bdd3445e3a5441c9ee81690136b772c 2024-11-20T15:22:26,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/bc6afc7556954058bf134a39493b24d6 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/bc6afc7556954058bf134a39493b24d6 2024-11-20T15:22:26,132 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/bc6afc7556954058bf134a39493b24d6, entries=150, sequenceid=166, filesize=11.9 K 2024-11-20T15:22:26,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/b0a3ff9d3494446c93e384797ab6d8fe as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b0a3ff9d3494446c93e384797ab6d8fe 2024-11-20T15:22:26,140 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b0a3ff9d3494446c93e384797ab6d8fe, entries=150, sequenceid=166, filesize=11.9 K 2024-11-20T15:22:26,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/1bdd3445e3a5441c9ee81690136b772c as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/1bdd3445e3a5441c9ee81690136b772c 2024-11-20T15:22:26,152 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/1bdd3445e3a5441c9ee81690136b772c, entries=150, sequenceid=166, filesize=11.9 K 2024-11-20T15:22:26,154 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 6e158976a217b84d83e0feeee3ad6faf in 495ms, sequenceid=166, compaction requested=false 2024-11-20T15:22:26,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:26,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:26,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-11-20T15:22:26,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-11-20T15:22:26,157 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-11-20T15:22:26,157 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 957 msec 2024-11-20T15:22:26,159 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees in 962 msec 2024-11-20T15:22:26,221 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-20T15:22:26,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:26,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:26,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:26,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:26,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:26,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:26,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:26,226 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/cbd61c135fc94235af482703ceff0d87 is 50, key is test_row_0/A:col10/1732116146219/Put/seqid=0 2024-11-20T15:22:26,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742132_1308 (size=12151) 2024-11-20T15:22:26,239 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:26,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116206238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:26,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-20T15:22:26,301 INFO [Thread-1245 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-11-20T15:22:26,302 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:22:26,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees 2024-11-20T15:22:26,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T15:22:26,304 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:22:26,304 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:22:26,305 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:22:26,342 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:26,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116206341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:26,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T15:22:26,456 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:26,456 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-20T15:22:26,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:26,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:26,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:26,457 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:26,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:26,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:26,545 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:26,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116206544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:26,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T15:22:26,609 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:26,609 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-20T15:22:26,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:26,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:26,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:26,610 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:26,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:26,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:26,635 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/cbd61c135fc94235af482703ceff0d87 2024-11-20T15:22:26,644 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/3f88e986172e45d6a02904c711e51c0a is 50, key is test_row_0/B:col10/1732116146219/Put/seqid=0 2024-11-20T15:22:26,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742133_1309 (size=12151) 2024-11-20T15:22:26,762 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:26,762 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-20T15:22:26,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:26,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:26,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:26,763 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:26,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:26,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:26,847 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:26,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116206847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:26,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T15:22:26,918 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:26,919 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-20T15:22:26,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:26,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:26,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:26,919 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:26,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:26,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:27,049 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/3f88e986172e45d6a02904c711e51c0a 2024-11-20T15:22:27,059 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/9b3ae9d916f24d438b607e24c83f2dcf is 50, key is test_row_0/C:col10/1732116146219/Put/seqid=0 2024-11-20T15:22:27,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742134_1310 (size=12151) 2024-11-20T15:22:27,065 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/9b3ae9d916f24d438b607e24c83f2dcf 2024-11-20T15:22:27,071 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:27,073 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-20T15:22:27,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:27,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:27,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:27,073 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:27,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:27,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:27,080 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/cbd61c135fc94235af482703ceff0d87 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/cbd61c135fc94235af482703ceff0d87 2024-11-20T15:22:27,085 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/cbd61c135fc94235af482703ceff0d87, entries=150, sequenceid=197, filesize=11.9 K 2024-11-20T15:22:27,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/3f88e986172e45d6a02904c711e51c0a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3f88e986172e45d6a02904c711e51c0a 2024-11-20T15:22:27,093 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3f88e986172e45d6a02904c711e51c0a, entries=150, sequenceid=197, filesize=11.9 K 2024-11-20T15:22:27,094 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/9b3ae9d916f24d438b607e24c83f2dcf as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/9b3ae9d916f24d438b607e24c83f2dcf 2024-11-20T15:22:27,098 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/9b3ae9d916f24d438b607e24c83f2dcf, entries=150, sequenceid=197, filesize=11.9 K 2024-11-20T15:22:27,099 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 6e158976a217b84d83e0feeee3ad6faf in 878ms, sequenceid=197, compaction requested=true 2024-11-20T15:22:27,099 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:27,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:22:27,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:27,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:22:27,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:27,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:22:27,100 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:27,100 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:27,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:27,101 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:27,101 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/B is initiating minor compaction (all files) 2024-11-20T15:22:27,101 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/B in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:27,101 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/313d31bf05aa450cad02f11a041263f8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b0a3ff9d3494446c93e384797ab6d8fe, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3f88e986172e45d6a02904c711e51c0a] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=35.9 K 2024-11-20T15:22:27,101 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:27,101 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/A is initiating minor compaction (all files) 2024-11-20T15:22:27,101 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/A in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:27,101 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/d27becdf76e14a9d8042508cd8157ff2, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/bc6afc7556954058bf134a39493b24d6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/cbd61c135fc94235af482703ceff0d87] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=35.9 K 2024-11-20T15:22:27,101 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 313d31bf05aa450cad02f11a041263f8, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732116144098 2024-11-20T15:22:27,102 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting d27becdf76e14a9d8042508cd8157ff2, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732116144098 2024-11-20T15:22:27,102 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting b0a3ff9d3494446c93e384797ab6d8fe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1732116144722 2024-11-20T15:22:27,103 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc6afc7556954058bf134a39493b24d6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1732116144722 2024-11-20T15:22:27,103 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f88e986172e45d6a02904c711e51c0a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732116145905 2024-11-20T15:22:27,103 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting cbd61c135fc94235af482703ceff0d87, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732116145905 2024-11-20T15:22:27,112 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#B#compaction#264 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:27,112 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#A#compaction#265 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:27,113 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/fe4c6b2bb31a4a6797cbb7d9aa7609ff is 50, key is test_row_0/B:col10/1732116146219/Put/seqid=0 2024-11-20T15:22:27,113 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/73f49dedbeac49d09120b08b91e3f545 is 50, key is test_row_0/A:col10/1732116146219/Put/seqid=0 2024-11-20T15:22:27,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742135_1311 (size=12595) 2024-11-20T15:22:27,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742136_1312 (size=12595) 2024-11-20T15:22:27,134 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/73f49dedbeac49d09120b08b91e3f545 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/73f49dedbeac49d09120b08b91e3f545 2024-11-20T15:22:27,139 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/A of 6e158976a217b84d83e0feeee3ad6faf into 73f49dedbeac49d09120b08b91e3f545(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:27,139 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:27,139 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/A, priority=13, startTime=1732116147099; duration=0sec 2024-11-20T15:22:27,139 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:27,139 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:A 2024-11-20T15:22:27,139 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:27,141 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:27,141 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/C is initiating minor compaction (all files) 2024-11-20T15:22:27,141 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/C in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:27,141 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/fded501adc31491783bcab67191756c2, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/1bdd3445e3a5441c9ee81690136b772c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/9b3ae9d916f24d438b607e24c83f2dcf] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=35.9 K 2024-11-20T15:22:27,141 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting fded501adc31491783bcab67191756c2, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732116144098 2024-11-20T15:22:27,141 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1bdd3445e3a5441c9ee81690136b772c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1732116144722 2024-11-20T15:22:27,142 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b3ae9d916f24d438b607e24c83f2dcf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732116145905 2024-11-20T15:22:27,144 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T15:22:27,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:27,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:27,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:27,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:27,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:27,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:27,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:27,149 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/146fef951ae54d63b5ce7f6fbd1dec99 is 50, key is test_row_0/A:col10/1732116147140/Put/seqid=0 2024-11-20T15:22:27,153 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#C#compaction#267 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:27,154 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/0b17f4a8a30f40d1a7d4695c8029ee34 is 50, key is test_row_0/C:col10/1732116146219/Put/seqid=0 2024-11-20T15:22:27,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742137_1313 (size=12151) 2024-11-20T15:22:27,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742138_1314 (size=12595) 2024-11-20T15:22:27,177 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:27,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:27,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116207174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:27,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116207174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:27,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:27,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116207176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:27,179 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:27,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116207177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:27,228 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:27,228 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-20T15:22:27,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:27,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:27,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:27,229 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:27,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:27,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:27,279 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:27,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116207278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:27,279 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:27,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116207278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:27,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:27,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116207280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:27,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:27,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116207280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:27,352 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:27,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116207351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:27,381 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:27,382 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-20T15:22:27,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:27,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:27,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:27,382 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:27,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:27,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:27,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T15:22:27,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:27,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116207480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:27,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:27,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116207482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:27,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:27,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116207484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:27,486 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:27,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116207484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:27,532 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/fe4c6b2bb31a4a6797cbb7d9aa7609ff as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/fe4c6b2bb31a4a6797cbb7d9aa7609ff 2024-11-20T15:22:27,534 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:27,535 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-20T15:22:27,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:27,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:27,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:27,535 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:27,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:27,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:27,541 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/B of 6e158976a217b84d83e0feeee3ad6faf into fe4c6b2bb31a4a6797cbb7d9aa7609ff(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:27,541 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:27,541 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/B, priority=13, startTime=1732116147099; duration=0sec 2024-11-20T15:22:27,541 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:27,541 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:B 2024-11-20T15:22:27,559 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/146fef951ae54d63b5ce7f6fbd1dec99 2024-11-20T15:22:27,566 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/0b17f4a8a30f40d1a7d4695c8029ee34 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/0b17f4a8a30f40d1a7d4695c8029ee34 2024-11-20T15:22:27,566 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/a8d6157658554820a9b43bdec60f1681 is 50, key is test_row_0/B:col10/1732116147140/Put/seqid=0 2024-11-20T15:22:27,571 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/C of 6e158976a217b84d83e0feeee3ad6faf into 0b17f4a8a30f40d1a7d4695c8029ee34(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:27,571 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:27,571 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/C, priority=13, startTime=1732116147100; duration=0sec 2024-11-20T15:22:27,571 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:27,571 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:C 2024-11-20T15:22:27,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742139_1315 (size=12151) 2024-11-20T15:22:27,687 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:27,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-20T15:22:27,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:27,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:27,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:27,688 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:27,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:27,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:27,786 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:27,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116207785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:27,787 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:27,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116207786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:27,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:27,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116207789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:27,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:27,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116207789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:27,840 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:27,841 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-20T15:22:27,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:27,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:27,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:27,841 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:27,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:27,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:27,975 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/a8d6157658554820a9b43bdec60f1681 2024-11-20T15:22:27,982 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/f6e2b3626e1046f19dba5a97dd25e9d2 is 50, key is test_row_0/C:col10/1732116147140/Put/seqid=0 2024-11-20T15:22:27,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742140_1316 (size=12151) 2024-11-20T15:22:27,986 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/f6e2b3626e1046f19dba5a97dd25e9d2 2024-11-20T15:22:27,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/146fef951ae54d63b5ce7f6fbd1dec99 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/146fef951ae54d63b5ce7f6fbd1dec99 2024-11-20T15:22:27,993 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:27,994 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-20T15:22:27,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:27,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:27,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:27,994 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:27,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:27,995 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/146fef951ae54d63b5ce7f6fbd1dec99, entries=150, sequenceid=209, filesize=11.9 K 2024-11-20T15:22:27,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:27,996 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/a8d6157658554820a9b43bdec60f1681 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/a8d6157658554820a9b43bdec60f1681 2024-11-20T15:22:28,000 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/a8d6157658554820a9b43bdec60f1681, entries=150, sequenceid=209, filesize=11.9 K 2024-11-20T15:22:28,001 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/f6e2b3626e1046f19dba5a97dd25e9d2 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/f6e2b3626e1046f19dba5a97dd25e9d2 2024-11-20T15:22:28,006 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/f6e2b3626e1046f19dba5a97dd25e9d2, entries=150, sequenceid=209, filesize=11.9 K 2024-11-20T15:22:28,007 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 6e158976a217b84d83e0feeee3ad6faf in 864ms, sequenceid=209, compaction requested=false 2024-11-20T15:22:28,007 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:28,146 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:28,147 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-20T15:22:28,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:28,147 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T15:22:28,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:28,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:28,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:28,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:28,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:28,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:28,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/b35073f8fa2e493b89adda3c188355a2 is 50, key is test_row_0/A:col10/1732116147174/Put/seqid=0 2024-11-20T15:22:28,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742141_1317 (size=12151) 2024-11-20T15:22:28,164 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/b35073f8fa2e493b89adda3c188355a2 2024-11-20T15:22:28,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/872777e7133b459e87b2d8a0fa8ce56d is 50, key is test_row_0/B:col10/1732116147174/Put/seqid=0 2024-11-20T15:22:28,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742142_1318 (size=12151) 2024-11-20T15:22:28,195 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/872777e7133b459e87b2d8a0fa8ce56d 2024-11-20T15:22:28,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/eb533810fbd0441fad58b2b9a3777b63 is 50, key is test_row_0/C:col10/1732116147174/Put/seqid=0 2024-11-20T15:22:28,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742143_1319 (size=12151) 2024-11-20T15:22:28,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:28,292 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:28,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:28,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116208296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:28,299 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:28,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116208296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:28,299 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:28,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116208297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:28,300 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:28,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116208298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:28,365 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:28,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116208363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:28,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:28,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116208400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:28,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:28,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116208400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:28,402 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:28,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116208400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:28,403 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:28,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116208401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:28,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T15:22:28,605 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:28,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116208603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:28,605 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:28,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116208603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:28,607 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:28,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116208604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:28,609 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:28,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116208605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:28,637 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/eb533810fbd0441fad58b2b9a3777b63 2024-11-20T15:22:28,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/b35073f8fa2e493b89adda3c188355a2 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/b35073f8fa2e493b89adda3c188355a2 2024-11-20T15:22:28,648 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/b35073f8fa2e493b89adda3c188355a2, entries=150, sequenceid=237, filesize=11.9 K 2024-11-20T15:22:28,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/872777e7133b459e87b2d8a0fa8ce56d as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/872777e7133b459e87b2d8a0fa8ce56d 2024-11-20T15:22:28,656 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/872777e7133b459e87b2d8a0fa8ce56d, entries=150, sequenceid=237, filesize=11.9 K 2024-11-20T15:22:28,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/eb533810fbd0441fad58b2b9a3777b63 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/eb533810fbd0441fad58b2b9a3777b63 2024-11-20T15:22:28,669 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/eb533810fbd0441fad58b2b9a3777b63, entries=150, sequenceid=237, filesize=11.9 K 2024-11-20T15:22:28,671 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 6e158976a217b84d83e0feeee3ad6faf in 524ms, sequenceid=237, compaction requested=true 2024-11-20T15:22:28,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:28,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:28,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=92 2024-11-20T15:22:28,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=92 2024-11-20T15:22:28,674 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-11-20T15:22:28,674 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3690 sec 2024-11-20T15:22:28,676 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees in 2.3730 sec 2024-11-20T15:22:28,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:28,911 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T15:22:28,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:28,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:28,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:28,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:28,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:28,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:28,922 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/54c38754b70349e7917de33e6016dd14 is 50, key is test_row_0/A:col10/1732116148296/Put/seqid=0 2024-11-20T15:22:28,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:28,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116208965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:28,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742144_1320 (size=14541) 2024-11-20T15:22:28,974 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/54c38754b70349e7917de33e6016dd14 2024-11-20T15:22:28,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:28,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116208968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:28,977 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:28,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116208972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:28,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:28,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116208976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:28,988 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/d8f953716cc44d35a8ed477e2329f946 is 50, key is test_row_0/B:col10/1732116148296/Put/seqid=0 2024-11-20T15:22:29,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742145_1321 (size=12151) 2024-11-20T15:22:29,058 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/d8f953716cc44d35a8ed477e2329f946 2024-11-20T15:22:29,076 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:29,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116209074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:29,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:29,083 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:29,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116209078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:29,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116209076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:29,087 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:29,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116209079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:29,097 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/d550fd6b6a174ea980d2f5d9b660ee72 is 50, key is test_row_0/C:col10/1732116148296/Put/seqid=0 2024-11-20T15:22:29,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742146_1322 (size=12151) 2024-11-20T15:22:29,145 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/d550fd6b6a174ea980d2f5d9b660ee72 2024-11-20T15:22:29,151 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/54c38754b70349e7917de33e6016dd14 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/54c38754b70349e7917de33e6016dd14 2024-11-20T15:22:29,156 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/54c38754b70349e7917de33e6016dd14, entries=200, sequenceid=248, filesize=14.2 K 2024-11-20T15:22:29,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/d8f953716cc44d35a8ed477e2329f946 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/d8f953716cc44d35a8ed477e2329f946 2024-11-20T15:22:29,176 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/d8f953716cc44d35a8ed477e2329f946, entries=150, sequenceid=248, filesize=11.9 K 2024-11-20T15:22:29,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/d550fd6b6a174ea980d2f5d9b660ee72 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/d550fd6b6a174ea980d2f5d9b660ee72 2024-11-20T15:22:29,204 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/d550fd6b6a174ea980d2f5d9b660ee72, entries=150, sequenceid=248, filesize=11.9 K 2024-11-20T15:22:29,206 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 6e158976a217b84d83e0feeee3ad6faf in 295ms, sequenceid=248, compaction requested=true 2024-11-20T15:22:29,206 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:29,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:22:29,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:29,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:22:29,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:29,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:22:29,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T15:22:29,206 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:22:29,206 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:22:29,210 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:22:29,210 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/B is initiating minor compaction (all files) 2024-11-20T15:22:29,210 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/B in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:29,211 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/fe4c6b2bb31a4a6797cbb7d9aa7609ff, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/a8d6157658554820a9b43bdec60f1681, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/872777e7133b459e87b2d8a0fa8ce56d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/d8f953716cc44d35a8ed477e2329f946] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=47.9 K 2024-11-20T15:22:29,211 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51438 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:22:29,211 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/A is initiating minor compaction (all files) 2024-11-20T15:22:29,211 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/A in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:29,211 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/73f49dedbeac49d09120b08b91e3f545, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/146fef951ae54d63b5ce7f6fbd1dec99, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/b35073f8fa2e493b89adda3c188355a2, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/54c38754b70349e7917de33e6016dd14] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=50.2 K 2024-11-20T15:22:29,211 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting fe4c6b2bb31a4a6797cbb7d9aa7609ff, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732116145905 2024-11-20T15:22:29,212 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 73f49dedbeac49d09120b08b91e3f545, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732116145905 2024-11-20T15:22:29,212 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting a8d6157658554820a9b43bdec60f1681, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732116146236 2024-11-20T15:22:29,213 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 146fef951ae54d63b5ce7f6fbd1dec99, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732116146236 2024-11-20T15:22:29,214 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 872777e7133b459e87b2d8a0fa8ce56d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732116147173 2024-11-20T15:22:29,214 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting b35073f8fa2e493b89adda3c188355a2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732116147173 2024-11-20T15:22:29,214 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting d8f953716cc44d35a8ed477e2329f946, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732116148296 2024-11-20T15:22:29,214 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 54c38754b70349e7917de33e6016dd14, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732116148294 2024-11-20T15:22:29,239 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#A#compaction#276 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:29,240 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/a697347fe34749b3a35fea02e08b00f1 is 50, key is test_row_0/A:col10/1732116148296/Put/seqid=0 2024-11-20T15:22:29,255 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#B#compaction#277 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:29,255 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/2a0cc61538a44b7f9f6b456be5a829dc is 50, key is test_row_0/B:col10/1732116148296/Put/seqid=0 2024-11-20T15:22:29,284 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T15:22:29,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:29,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:29,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:29,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:29,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:29,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:29,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:29,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742147_1323 (size=12731) 2024-11-20T15:22:29,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:29,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116209301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:29,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:29,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116209303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:29,310 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/a697347fe34749b3a35fea02e08b00f1 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/a697347fe34749b3a35fea02e08b00f1 2024-11-20T15:22:29,312 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:29,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116209307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:29,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:29,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116209308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:29,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742148_1324 (size=12731) 2024-11-20T15:22:29,314 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/18de11281ed4491197c258311e46b5e8 is 50, key is test_row_0/A:col10/1732116148970/Put/seqid=0 2024-11-20T15:22:29,316 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/A of 6e158976a217b84d83e0feeee3ad6faf into a697347fe34749b3a35fea02e08b00f1(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:29,316 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:29,316 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/A, priority=12, startTime=1732116149206; duration=0sec 2024-11-20T15:22:29,316 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:29,316 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:A 2024-11-20T15:22:29,316 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:22:29,318 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:22:29,318 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/C is initiating minor compaction (all files) 2024-11-20T15:22:29,318 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/C in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:29,318 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/0b17f4a8a30f40d1a7d4695c8029ee34, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/f6e2b3626e1046f19dba5a97dd25e9d2, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/eb533810fbd0441fad58b2b9a3777b63, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/d550fd6b6a174ea980d2f5d9b660ee72] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=47.9 K 2024-11-20T15:22:29,318 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0b17f4a8a30f40d1a7d4695c8029ee34, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732116145905 2024-11-20T15:22:29,319 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting f6e2b3626e1046f19dba5a97dd25e9d2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732116146236 2024-11-20T15:22:29,320 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb533810fbd0441fad58b2b9a3777b63, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732116147173 2024-11-20T15:22:29,321 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting d550fd6b6a174ea980d2f5d9b660ee72, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732116148296 2024-11-20T15:22:29,321 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/2a0cc61538a44b7f9f6b456be5a829dc as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/2a0cc61538a44b7f9f6b456be5a829dc 2024-11-20T15:22:29,330 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/B of 6e158976a217b84d83e0feeee3ad6faf into 2a0cc61538a44b7f9f6b456be5a829dc(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:29,330 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:29,330 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/B, priority=12, startTime=1732116149206; duration=0sec 2024-11-20T15:22:29,330 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:29,330 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:B 2024-11-20T15:22:29,345 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#C#compaction#279 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:29,345 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/2e36ca9a30f0471a89411e5191fb7241 is 50, key is test_row_0/C:col10/1732116148296/Put/seqid=0 2024-11-20T15:22:29,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742149_1325 (size=14741) 2024-11-20T15:22:29,364 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/18de11281ed4491197c258311e46b5e8 2024-11-20T15:22:29,395 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/3e29441a10204f98b31e1c1b6b802e5e is 50, key is test_row_0/B:col10/1732116148970/Put/seqid=0 2024-11-20T15:22:29,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742150_1326 (size=12731) 2024-11-20T15:22:29,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:29,412 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:29,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116209409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:29,415 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:29,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116209415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:29,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116209409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:29,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:29,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116209421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:29,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742151_1327 (size=12301) 2024-11-20T15:22:29,423 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/2e36ca9a30f0471a89411e5191fb7241 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/2e36ca9a30f0471a89411e5191fb7241 2024-11-20T15:22:29,424 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/3e29441a10204f98b31e1c1b6b802e5e 2024-11-20T15:22:29,430 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/C of 6e158976a217b84d83e0feeee3ad6faf into 2e36ca9a30f0471a89411e5191fb7241(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:29,430 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:29,430 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/C, priority=12, startTime=1732116149206; duration=0sec 2024-11-20T15:22:29,430 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:29,430 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:C 2024-11-20T15:22:29,434 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/c09d0c54dfe3455b90104a9b45bd961f is 50, key is test_row_0/C:col10/1732116148970/Put/seqid=0 2024-11-20T15:22:29,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742152_1328 (size=12301) 2024-11-20T15:22:29,467 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/c09d0c54dfe3455b90104a9b45bd961f 2024-11-20T15:22:29,473 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/18de11281ed4491197c258311e46b5e8 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/18de11281ed4491197c258311e46b5e8 2024-11-20T15:22:29,479 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/18de11281ed4491197c258311e46b5e8, entries=200, sequenceid=274, filesize=14.4 K 2024-11-20T15:22:29,481 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/3e29441a10204f98b31e1c1b6b802e5e as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3e29441a10204f98b31e1c1b6b802e5e 2024-11-20T15:22:29,487 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3e29441a10204f98b31e1c1b6b802e5e, entries=150, sequenceid=274, filesize=12.0 K 2024-11-20T15:22:29,489 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/c09d0c54dfe3455b90104a9b45bd961f as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/c09d0c54dfe3455b90104a9b45bd961f 2024-11-20T15:22:29,494 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/c09d0c54dfe3455b90104a9b45bd961f, entries=150, sequenceid=274, filesize=12.0 K 2024-11-20T15:22:29,495 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 6e158976a217b84d83e0feeee3ad6faf in 211ms, sequenceid=274, compaction requested=false 2024-11-20T15:22:29,495 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:29,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:29,620 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T15:22:29,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:29,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:29,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:29,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:29,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:29,621 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:29,632 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/65c8741d3fb14baf860d162e070d37b6 is 50, key is test_row_0/A:col10/1732116149617/Put/seqid=0 2024-11-20T15:22:29,669 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:29,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116209664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:29,673 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:29,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116209666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:29,673 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:29,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116209667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:29,673 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:29,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116209669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:29,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742153_1329 (size=12301) 2024-11-20T15:22:29,676 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/65c8741d3fb14baf860d162e070d37b6 2024-11-20T15:22:29,687 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/b6feb6cf74e24b34b1e00fe704744741 is 50, key is test_row_0/B:col10/1732116149617/Put/seqid=0 2024-11-20T15:22:29,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742154_1330 (size=12301) 2024-11-20T15:22:29,732 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/b6feb6cf74e24b34b1e00fe704744741 2024-11-20T15:22:29,754 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/54c8fcba1fbc4efab6a0cbd57db7836a is 50, key is test_row_0/C:col10/1732116149617/Put/seqid=0 2024-11-20T15:22:29,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:29,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116209770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:29,779 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:29,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116209774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:29,779 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:29,779 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:29,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116209774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:29,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116209775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:29,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742155_1331 (size=12301) 2024-11-20T15:22:29,799 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/54c8fcba1fbc4efab6a0cbd57db7836a 2024-11-20T15:22:29,805 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/65c8741d3fb14baf860d162e070d37b6 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/65c8741d3fb14baf860d162e070d37b6 2024-11-20T15:22:29,815 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/65c8741d3fb14baf860d162e070d37b6, entries=150, sequenceid=288, filesize=12.0 K 2024-11-20T15:22:29,816 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/b6feb6cf74e24b34b1e00fe704744741 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b6feb6cf74e24b34b1e00fe704744741 2024-11-20T15:22:29,828 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b6feb6cf74e24b34b1e00fe704744741, entries=150, sequenceid=288, filesize=12.0 K 2024-11-20T15:22:29,831 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/54c8fcba1fbc4efab6a0cbd57db7836a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/54c8fcba1fbc4efab6a0cbd57db7836a 2024-11-20T15:22:29,837 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/54c8fcba1fbc4efab6a0cbd57db7836a, entries=150, sequenceid=288, filesize=12.0 K 2024-11-20T15:22:29,838 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 6e158976a217b84d83e0feeee3ad6faf in 219ms, sequenceid=288, compaction requested=true 2024-11-20T15:22:29,838 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:29,838 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:29,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:22:29,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:29,840 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39773 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:29,840 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/A is initiating minor compaction (all files) 2024-11-20T15:22:29,840 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/A in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:29,840 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/a697347fe34749b3a35fea02e08b00f1, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/18de11281ed4491197c258311e46b5e8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/65c8741d3fb14baf860d162e070d37b6] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=38.8 K 2024-11-20T15:22:29,841 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting a697347fe34749b3a35fea02e08b00f1, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732116148296 2024-11-20T15:22:29,841 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 18de11281ed4491197c258311e46b5e8, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732116148959 2024-11-20T15:22:29,841 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:29,842 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 65c8741d3fb14baf860d162e070d37b6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732116149290 2024-11-20T15:22:29,843 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:29,843 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/B is initiating minor compaction (all files) 2024-11-20T15:22:29,844 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/B in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:29,844 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/2a0cc61538a44b7f9f6b456be5a829dc, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3e29441a10204f98b31e1c1b6b802e5e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b6feb6cf74e24b34b1e00fe704744741] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=36.5 K 2024-11-20T15:22:29,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:22:29,844 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a0cc61538a44b7f9f6b456be5a829dc, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732116148296 2024-11-20T15:22:29,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:29,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:22:29,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:29,845 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e29441a10204f98b31e1c1b6b802e5e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732116148965 2024-11-20T15:22:29,845 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting b6feb6cf74e24b34b1e00fe704744741, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732116149290 2024-11-20T15:22:29,889 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#A#compaction#285 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:29,889 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/b48b4a6982354a3da9a0be8f55648275 is 50, key is test_row_0/A:col10/1732116149617/Put/seqid=0 2024-11-20T15:22:29,894 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#B#compaction#286 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:29,894 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/1a358e55823d4394a3198f77dd6a8c0f is 50, key is test_row_0/B:col10/1732116149617/Put/seqid=0 2024-11-20T15:22:29,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742156_1332 (size=12983) 2024-11-20T15:22:29,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742157_1333 (size=12983) 2024-11-20T15:22:29,980 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/1a358e55823d4394a3198f77dd6a8c0f as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/1a358e55823d4394a3198f77dd6a8c0f 2024-11-20T15:22:29,986 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T15:22:29,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:29,987 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:29,987 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:29,987 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:29,987 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:29,987 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:29,987 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:29,995 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/B of 6e158976a217b84d83e0feeee3ad6faf into 1a358e55823d4394a3198f77dd6a8c0f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:29,995 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:29,995 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/B, priority=13, startTime=1732116149841; duration=0sec 2024-11-20T15:22:29,995 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:29,995 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:B 2024-11-20T15:22:29,995 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:29,998 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:29,998 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/C is initiating minor compaction (all files) 2024-11-20T15:22:29,998 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/C in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:29,998 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/2e36ca9a30f0471a89411e5191fb7241, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/c09d0c54dfe3455b90104a9b45bd961f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/54c8fcba1fbc4efab6a0cbd57db7836a] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=36.5 K 2024-11-20T15:22:29,999 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e36ca9a30f0471a89411e5191fb7241, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732116148296 2024-11-20T15:22:29,999 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting c09d0c54dfe3455b90104a9b45bd961f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732116148965 2024-11-20T15:22:30,000 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 54c8fcba1fbc4efab6a0cbd57db7836a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732116149290 2024-11-20T15:22:30,002 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/a9f3638f745346d3b7304eefcbf07709 is 50, key is test_row_0/A:col10/1732116149985/Put/seqid=0 2024-11-20T15:22:30,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:30,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116210007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:30,014 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:30,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116210007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:30,016 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:30,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116210011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:30,021 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:30,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116210017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:30,050 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#C#compaction#288 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:30,051 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/98ea6dfc457e4df99e54550ea6153c1a is 50, key is test_row_0/C:col10/1732116149617/Put/seqid=0 2024-11-20T15:22:30,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742158_1334 (size=12301) 2024-11-20T15:22:30,091 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/a9f3638f745346d3b7304eefcbf07709 2024-11-20T15:22:30,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:30,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116210115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:30,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:30,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116210116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:30,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:30,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116210117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:30,123 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:30,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116210122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:30,125 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/6c318c266e364b039ed59b2c57e9642e is 50, key is test_row_0/B:col10/1732116149985/Put/seqid=0 2024-11-20T15:22:30,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742159_1335 (size=12983) 2024-11-20T15:22:30,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742160_1336 (size=12301) 2024-11-20T15:22:30,158 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/6c318c266e364b039ed59b2c57e9642e 2024-11-20T15:22:30,185 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/81ce4d93fa2b4e0480c7bfe121654419 is 50, key is test_row_0/C:col10/1732116149985/Put/seqid=0 2024-11-20T15:22:30,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742161_1337 (size=12301) 2024-11-20T15:22:30,253 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/81ce4d93fa2b4e0480c7bfe121654419 2024-11-20T15:22:30,262 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/a9f3638f745346d3b7304eefcbf07709 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/a9f3638f745346d3b7304eefcbf07709 2024-11-20T15:22:30,267 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/a9f3638f745346d3b7304eefcbf07709, entries=150, sequenceid=315, filesize=12.0 K 2024-11-20T15:22:30,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/6c318c266e364b039ed59b2c57e9642e as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/6c318c266e364b039ed59b2c57e9642e 2024-11-20T15:22:30,274 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/6c318c266e364b039ed59b2c57e9642e, entries=150, sequenceid=315, filesize=12.0 K 2024-11-20T15:22:30,279 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/81ce4d93fa2b4e0480c7bfe121654419 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/81ce4d93fa2b4e0480c7bfe121654419 2024-11-20T15:22:30,284 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/81ce4d93fa2b4e0480c7bfe121654419, entries=150, sequenceid=315, filesize=12.0 K 2024-11-20T15:22:30,285 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 6e158976a217b84d83e0feeee3ad6faf in 299ms, sequenceid=315, compaction requested=false 2024-11-20T15:22:30,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:30,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:30,330 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T15:22:30,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:30,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:30,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:30,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:30,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:30,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:30,342 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/b48b4a6982354a3da9a0be8f55648275 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/b48b4a6982354a3da9a0be8f55648275 2024-11-20T15:22:30,349 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/A of 6e158976a217b84d83e0feeee3ad6faf into b48b4a6982354a3da9a0be8f55648275(size=12.7 K), total size for store is 24.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:30,349 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:30,349 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/A, priority=13, startTime=1732116149838; duration=0sec 2024-11-20T15:22:30,349 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:30,349 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:A 2024-11-20T15:22:30,368 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/d7ae2b4aced84d398049b4753452c10c is 50, key is test_row_0/A:col10/1732116150325/Put/seqid=0 2024-11-20T15:22:30,381 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:30,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116210377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:30,385 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:30,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116210378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:30,386 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:30,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116210379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:30,390 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:30,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116210381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:30,390 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:30,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116210381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:30,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T15:22:30,408 INFO [Thread-1245 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-11-20T15:22:30,410 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:22:30,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees 2024-11-20T15:22:30,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-20T15:22:30,412 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=93, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:22:30,413 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=93, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:22:30,413 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:22:30,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742162_1338 (size=12301) 2024-11-20T15:22:30,427 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/d7ae2b4aced84d398049b4753452c10c 2024-11-20T15:22:30,453 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/3db9db2db0274215a0539e4a9ca3c69c is 50, key is test_row_0/B:col10/1732116150325/Put/seqid=0 2024-11-20T15:22:30,484 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:30,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116210483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:30,491 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:30,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116210488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:30,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:30,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116210491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:30,498 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:30,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116210492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:30,498 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:30,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116210493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:30,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742163_1339 (size=12301) 2024-11-20T15:22:30,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-20T15:22:30,535 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/98ea6dfc457e4df99e54550ea6153c1a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/98ea6dfc457e4df99e54550ea6153c1a 2024-11-20T15:22:30,545 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/C of 6e158976a217b84d83e0feeee3ad6faf into 98ea6dfc457e4df99e54550ea6153c1a(size=12.7 K), total size for store is 24.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:30,545 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:30,545 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/C, priority=13, startTime=1732116149844; duration=0sec 2024-11-20T15:22:30,545 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:30,545 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:C 2024-11-20T15:22:30,564 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:30,565 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-11-20T15:22:30,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:30,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:30,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:30,565 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:30,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:30,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:30,692 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:30,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116210689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:30,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:30,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116210693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:30,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:30,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116210699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:30,703 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:30,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116210701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:30,705 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:30,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116210702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:30,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-20T15:22:30,724 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:30,725 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-11-20T15:22:30,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:30,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:30,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:30,726 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:30,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:30,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:30,878 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:30,879 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-11-20T15:22:30,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:30,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:30,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:30,879 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:30,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:30,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:30,901 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/3db9db2db0274215a0539e4a9ca3c69c 2024-11-20T15:22:30,922 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/0e2d34b919ee4df2b1a7f5af9865b93f is 50, key is test_row_0/C:col10/1732116150325/Put/seqid=0 2024-11-20T15:22:30,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742164_1340 (size=12301) 2024-11-20T15:22:30,955 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/0e2d34b919ee4df2b1a7f5af9865b93f 2024-11-20T15:22:30,966 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/d7ae2b4aced84d398049b4753452c10c as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/d7ae2b4aced84d398049b4753452c10c 2024-11-20T15:22:30,972 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/d7ae2b4aced84d398049b4753452c10c, entries=150, sequenceid=329, filesize=12.0 K 2024-11-20T15:22:30,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/3db9db2db0274215a0539e4a9ca3c69c as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3db9db2db0274215a0539e4a9ca3c69c 2024-11-20T15:22:30,978 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3db9db2db0274215a0539e4a9ca3c69c, entries=150, sequenceid=329, filesize=12.0 K 2024-11-20T15:22:30,982 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/0e2d34b919ee4df2b1a7f5af9865b93f as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/0e2d34b919ee4df2b1a7f5af9865b93f 2024-11-20T15:22:30,997 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:30,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116210994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:31,000 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/0e2d34b919ee4df2b1a7f5af9865b93f, entries=150, sequenceid=329, filesize=12.0 K 2024-11-20T15:22:31,001 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 6e158976a217b84d83e0feeee3ad6faf in 671ms, sequenceid=329, compaction requested=true 2024-11-20T15:22:31,001 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:31,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:22:31,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:31,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:22:31,002 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:31,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:31,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:22:31,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T15:22:31,002 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:31,003 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:31,003 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/B is initiating minor compaction (all files) 2024-11-20T15:22:31,003 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/B in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:31,003 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/1a358e55823d4394a3198f77dd6a8c0f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/6c318c266e364b039ed59b2c57e9642e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3db9db2db0274215a0539e4a9ca3c69c] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=36.7 K 2024-11-20T15:22:31,003 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:31,004 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/A is initiating minor compaction (all files) 2024-11-20T15:22:31,004 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/A in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:31,004 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/b48b4a6982354a3da9a0be8f55648275, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/a9f3638f745346d3b7304eefcbf07709, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/d7ae2b4aced84d398049b4753452c10c] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=36.7 K 2024-11-20T15:22:31,004 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1a358e55823d4394a3198f77dd6a8c0f, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732116149290 2024-11-20T15:22:31,004 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting b48b4a6982354a3da9a0be8f55648275, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732116149290 2024-11-20T15:22:31,005 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6c318c266e364b039ed59b2c57e9642e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732116149660 2024-11-20T15:22:31,005 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting a9f3638f745346d3b7304eefcbf07709, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732116149660 2024-11-20T15:22:31,005 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3db9db2db0274215a0539e4a9ca3c69c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732116150324 2024-11-20T15:22:31,006 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting d7ae2b4aced84d398049b4753452c10c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732116150324 2024-11-20T15:22:31,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:31,011 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T15:22:31,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:31,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:31,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:31,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:31,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:31,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:31,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-20T15:22:31,022 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/e45fd3038fdd4cf482b3c4b60048bd03 is 50, key is test_row_0/A:col10/1732116150379/Put/seqid=0 2024-11-20T15:22:31,023 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#B#compaction#295 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:31,023 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/af9d113f02604b868df975653d4aa7d6 is 50, key is test_row_0/B:col10/1732116150325/Put/seqid=0 2024-11-20T15:22:31,026 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#A#compaction#296 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:31,027 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/55509dd133ff4d22885b023716baa940 is 50, key is test_row_0/A:col10/1732116150325/Put/seqid=0 2024-11-20T15:22:31,033 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:31,038 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-11-20T15:22:31,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:31,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:31,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:31,038 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:31,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:31,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:31,040 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:31,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116211033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:31,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:31,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116211040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:31,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:31,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116211041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:31,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:31,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116211049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:31,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742165_1341 (size=13085) 2024-11-20T15:22:31,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742166_1342 (size=12301) 2024-11-20T15:22:31,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742167_1343 (size=13085) 2024-11-20T15:22:31,147 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:31,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116211142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:31,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:31,155 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:31,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116211148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:31,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116211149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:31,158 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:31,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116211153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:31,190 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:31,191 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-11-20T15:22:31,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:31,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:31,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:31,191 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:31,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:31,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:31,343 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:31,344 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-11-20T15:22:31,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:31,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:31,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:31,346 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:31,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:31,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:31,352 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:31,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116211349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:31,362 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:31,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116211357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:31,363 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:31,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116211357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:31,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:31,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116211360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:31,469 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/e45fd3038fdd4cf482b3c4b60048bd03 2024-11-20T15:22:31,471 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/55509dd133ff4d22885b023716baa940 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/55509dd133ff4d22885b023716baa940 2024-11-20T15:22:31,479 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/A of 6e158976a217b84d83e0feeee3ad6faf into 55509dd133ff4d22885b023716baa940(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:31,479 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:31,479 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/A, priority=13, startTime=1732116151001; duration=0sec 2024-11-20T15:22:31,479 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:31,479 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:A 2024-11-20T15:22:31,479 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:31,480 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:31,481 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/C is initiating minor compaction (all files) 2024-11-20T15:22:31,481 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/C in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:31,481 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/98ea6dfc457e4df99e54550ea6153c1a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/81ce4d93fa2b4e0480c7bfe121654419, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/0e2d34b919ee4df2b1a7f5af9865b93f] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=36.7 K 2024-11-20T15:22:31,481 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 98ea6dfc457e4df99e54550ea6153c1a, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732116149290 2024-11-20T15:22:31,482 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 81ce4d93fa2b4e0480c7bfe121654419, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732116149660 2024-11-20T15:22:31,482 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e2d34b919ee4df2b1a7f5af9865b93f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732116150324 2024-11-20T15:22:31,484 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/ceb93200d0214bf6916e22e641901449 is 50, key is test_row_0/B:col10/1732116150379/Put/seqid=0 2024-11-20T15:22:31,492 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#C#compaction#298 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:31,493 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/9b4fe192dd91412f991cfd055d9ed2dd is 50, key is test_row_0/C:col10/1732116150325/Put/seqid=0 2024-11-20T15:22:31,499 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:31,499 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-11-20T15:22:31,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:31,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:31,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:31,500 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:31,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:31,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:31,504 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:31,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116211498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:31,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-20T15:22:31,516 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/af9d113f02604b868df975653d4aa7d6 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/af9d113f02604b868df975653d4aa7d6 2024-11-20T15:22:31,521 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/B of 6e158976a217b84d83e0feeee3ad6faf into af9d113f02604b868df975653d4aa7d6(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:31,521 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:31,521 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/B, priority=13, startTime=1732116151002; duration=0sec 2024-11-20T15:22:31,521 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:31,521 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:B 2024-11-20T15:22:31,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742168_1344 (size=13085) 2024-11-20T15:22:31,560 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/9b4fe192dd91412f991cfd055d9ed2dd as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/9b4fe192dd91412f991cfd055d9ed2dd 2024-11-20T15:22:31,565 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/C of 6e158976a217b84d83e0feeee3ad6faf into 9b4fe192dd91412f991cfd055d9ed2dd(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:31,566 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:31,566 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/C, priority=13, startTime=1732116151002; duration=0sec 2024-11-20T15:22:31,566 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:31,566 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:C 2024-11-20T15:22:31,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742169_1345 (size=12301) 2024-11-20T15:22:31,572 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/ceb93200d0214bf6916e22e641901449 2024-11-20T15:22:31,593 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/a259f6a896ab45a48a1446f3202f56ad is 50, key is test_row_0/C:col10/1732116150379/Put/seqid=0 2024-11-20T15:22:31,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742170_1346 (size=12301) 2024-11-20T15:22:31,642 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/a259f6a896ab45a48a1446f3202f56ad 2024-11-20T15:22:31,650 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/e45fd3038fdd4cf482b3c4b60048bd03 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/e45fd3038fdd4cf482b3c4b60048bd03 2024-11-20T15:22:31,652 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:31,653 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-11-20T15:22:31,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:31,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:31,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:31,653 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:31,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:31,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:31,654 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/e45fd3038fdd4cf482b3c4b60048bd03, entries=150, sequenceid=356, filesize=12.0 K 2024-11-20T15:22:31,658 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/ceb93200d0214bf6916e22e641901449 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/ceb93200d0214bf6916e22e641901449 2024-11-20T15:22:31,663 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:31,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116211654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:31,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:31,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116211663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:31,669 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/ceb93200d0214bf6916e22e641901449, entries=150, sequenceid=356, filesize=12.0 K 2024-11-20T15:22:31,669 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:31,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116211665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:31,671 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/a259f6a896ab45a48a1446f3202f56ad as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/a259f6a896ab45a48a1446f3202f56ad 2024-11-20T15:22:31,674 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:31,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116211670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:31,676 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/a259f6a896ab45a48a1446f3202f56ad, entries=150, sequenceid=356, filesize=12.0 K 2024-11-20T15:22:31,677 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=67.09 KB/68700 for 6e158976a217b84d83e0feeee3ad6faf in 667ms, sequenceid=356, compaction requested=false 2024-11-20T15:22:31,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:31,810 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:31,810 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-11-20T15:22:31,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:31,811 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T15:22:31,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:31,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:31,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:31,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:31,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:31,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:31,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/0145124deeaa48bdb2b9749fc5cca07b is 50, key is test_row_0/A:col10/1732116151035/Put/seqid=0 2024-11-20T15:22:31,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742171_1347 (size=12301) 2024-11-20T15:22:31,877 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/0145124deeaa48bdb2b9749fc5cca07b 2024-11-20T15:22:31,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/de4d5b0a177a4648bc859e9897b5139f is 50, key is test_row_0/B:col10/1732116151035/Put/seqid=0 2024-11-20T15:22:31,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742172_1348 (size=12301) 2024-11-20T15:22:31,940 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/de4d5b0a177a4648bc859e9897b5139f 2024-11-20T15:22:31,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/92c2e2eb7ebc4071918a584bdae88f06 is 50, key is test_row_0/C:col10/1732116151035/Put/seqid=0 2024-11-20T15:22:31,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742173_1349 (size=12301) 2024-11-20T15:22:31,985 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/92c2e2eb7ebc4071918a584bdae88f06 2024-11-20T15:22:31,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/0145124deeaa48bdb2b9749fc5cca07b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/0145124deeaa48bdb2b9749fc5cca07b 2024-11-20T15:22:31,999 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/0145124deeaa48bdb2b9749fc5cca07b, entries=150, sequenceid=372, filesize=12.0 K 2024-11-20T15:22:32,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/de4d5b0a177a4648bc859e9897b5139f as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/de4d5b0a177a4648bc859e9897b5139f 2024-11-20T15:22:32,005 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/de4d5b0a177a4648bc859e9897b5139f, entries=150, sequenceid=372, filesize=12.0 K 2024-11-20T15:22:32,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/92c2e2eb7ebc4071918a584bdae88f06 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/92c2e2eb7ebc4071918a584bdae88f06 2024-11-20T15:22:32,013 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/92c2e2eb7ebc4071918a584bdae88f06, entries=150, sequenceid=372, filesize=12.0 K 2024-11-20T15:22:32,014 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=0 B/0 for 6e158976a217b84d83e0feeee3ad6faf in 203ms, sequenceid=372, compaction requested=true 2024-11-20T15:22:32,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:32,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:32,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=94 2024-11-20T15:22:32,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=94 2024-11-20T15:22:32,020 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-11-20T15:22:32,021 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6030 sec 2024-11-20T15:22:32,023 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees in 1.6110 sec 2024-11-20T15:22:32,225 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T15:22:32,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:32,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:32,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:32,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:32,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:32,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:32,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:32,242 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/339c6c485cda44c1b6ff2a8397f471b3 is 50, key is test_row_0/A:col10/1732116152224/Put/seqid=0 2024-11-20T15:22:32,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742174_1350 (size=14741) 2024-11-20T15:22:32,263 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=385 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/339c6c485cda44c1b6ff2a8397f471b3 2024-11-20T15:22:32,290 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/37a160ded8d54d97852bd1a60c5d02a4 is 50, key is test_row_0/B:col10/1732116152224/Put/seqid=0 2024-11-20T15:22:32,339 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:32,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116212334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:32,340 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:32,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116212334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:32,340 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:32,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116212335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:32,341 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:32,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116212339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:32,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742175_1351 (size=12301) 2024-11-20T15:22:32,445 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:32,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116212441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:32,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:32,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:32,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116212443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:32,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116212441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:32,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:32,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116212443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:32,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:32,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116212508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:32,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-20T15:22:32,517 INFO [Thread-1245 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-11-20T15:22:32,519 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:22:32,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=95, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=95, table=TestAcidGuarantees 2024-11-20T15:22:32,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-20T15:22:32,524 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=95, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=95, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:22:32,525 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=95, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=95, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:22:32,525 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:22:32,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-20T15:22:32,650 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:32,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116212648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:32,651 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:32,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116212648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:32,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:32,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116212649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:32,651 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:32,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116212648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:32,678 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:32,679 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=96 2024-11-20T15:22:32,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:32,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:32,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:32,679 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=96}] handler.RSProcedureHandler(58): pid=96 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:32,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=96 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:32,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=96 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:32,743 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=385 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/37a160ded8d54d97852bd1a60c5d02a4 2024-11-20T15:22:32,757 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/3643962304bf4dc697dc1923d2c7200d is 50, key is test_row_0/C:col10/1732116152224/Put/seqid=0 2024-11-20T15:22:32,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742176_1352 (size=12301) 2024-11-20T15:22:32,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-20T15:22:32,832 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:32,836 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=96 2024-11-20T15:22:32,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:32,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:32,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:32,836 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] handler.RSProcedureHandler(58): pid=96 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:32,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=96 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:32,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=96 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:32,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:32,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116212951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:32,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:32,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116212952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:32,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:32,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116212955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:32,974 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:32,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116212964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:32,989 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:32,990 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=96 2024-11-20T15:22:32,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:32,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:32,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:32,990 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] handler.RSProcedureHandler(58): pid=96 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:32,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=96 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:32,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=96 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:33,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-20T15:22:33,143 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:33,143 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=96 2024-11-20T15:22:33,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:33,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:33,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:33,144 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=96}] handler.RSProcedureHandler(58): pid=96 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:33,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=96 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:33,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=96 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:33,191 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=385 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/3643962304bf4dc697dc1923d2c7200d 2024-11-20T15:22:33,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/339c6c485cda44c1b6ff2a8397f471b3 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/339c6c485cda44c1b6ff2a8397f471b3 2024-11-20T15:22:33,200 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/339c6c485cda44c1b6ff2a8397f471b3, entries=200, sequenceid=385, filesize=14.4 K 2024-11-20T15:22:33,201 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/37a160ded8d54d97852bd1a60c5d02a4 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/37a160ded8d54d97852bd1a60c5d02a4 2024-11-20T15:22:33,205 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/37a160ded8d54d97852bd1a60c5d02a4, entries=150, sequenceid=385, filesize=12.0 K 2024-11-20T15:22:33,205 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/3643962304bf4dc697dc1923d2c7200d as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/3643962304bf4dc697dc1923d2c7200d 2024-11-20T15:22:33,211 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/3643962304bf4dc697dc1923d2c7200d, entries=150, sequenceid=385, filesize=12.0 K 2024-11-20T15:22:33,212 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 6e158976a217b84d83e0feeee3ad6faf in 987ms, sequenceid=385, compaction requested=true 2024-11-20T15:22:33,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:33,213 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:22:33,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:22:33,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:33,213 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:22:33,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:22:33,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:33,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:22:33,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:33,214 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52428 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:22:33,214 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/A is initiating minor compaction (all files) 2024-11-20T15:22:33,214 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/A in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:33,214 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/55509dd133ff4d22885b023716baa940, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/e45fd3038fdd4cf482b3c4b60048bd03, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/0145124deeaa48bdb2b9749fc5cca07b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/339c6c485cda44c1b6ff2a8397f471b3] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=51.2 K 2024-11-20T15:22:33,215 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49988 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:22:33,215 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/B is initiating minor compaction (all files) 2024-11-20T15:22:33,215 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/B in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:33,215 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/af9d113f02604b868df975653d4aa7d6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/ceb93200d0214bf6916e22e641901449, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/de4d5b0a177a4648bc859e9897b5139f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/37a160ded8d54d97852bd1a60c5d02a4] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=48.8 K 2024-11-20T15:22:33,220 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 55509dd133ff4d22885b023716baa940, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732116150324 2024-11-20T15:22:33,220 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting af9d113f02604b868df975653d4aa7d6, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732116150324 2024-11-20T15:22:33,220 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting e45fd3038fdd4cf482b3c4b60048bd03, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732116150377 2024-11-20T15:22:33,220 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting ceb93200d0214bf6916e22e641901449, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732116150377 2024-11-20T15:22:33,221 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0145124deeaa48bdb2b9749fc5cca07b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732116151017 2024-11-20T15:22:33,221 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting de4d5b0a177a4648bc859e9897b5139f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732116151017 2024-11-20T15:22:33,221 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 339c6c485cda44c1b6ff2a8397f471b3, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1732116152204 2024-11-20T15:22:33,221 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 37a160ded8d54d97852bd1a60c5d02a4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1732116152204 2024-11-20T15:22:33,240 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#A#compaction#306 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:33,241 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/1753df2ece4f4f4288ced5eb34838475 is 50, key is test_row_0/A:col10/1732116152224/Put/seqid=0 2024-11-20T15:22:33,255 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#B#compaction#307 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:33,256 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/be147702d8ea46ea8535b68ad37a7a8e is 50, key is test_row_0/B:col10/1732116152224/Put/seqid=0 2024-11-20T15:22:33,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742177_1353 (size=13221) 2024-11-20T15:22:33,306 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:33,307 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=96 2024-11-20T15:22:33,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:33,307 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T15:22:33,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:33,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:33,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:33,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:33,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:33,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:33,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742178_1354 (size=13221) 2024-11-20T15:22:33,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/ad4798377630477fbc57230ce8353f82 is 50, key is test_row_0/A:col10/1732116152321/Put/seqid=0 2024-11-20T15:22:33,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742179_1355 (size=12301) 2024-11-20T15:22:33,382 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/ad4798377630477fbc57230ce8353f82 2024-11-20T15:22:33,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/773a95e02580483bba88460af8d367c7 is 50, key is test_row_0/B:col10/1732116152321/Put/seqid=0 2024-11-20T15:22:33,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742180_1356 (size=12301) 2024-11-20T15:22:33,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:33,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:33,501 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:33,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116213491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:33,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:33,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116213494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:33,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:33,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116213494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:33,506 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:33,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116213497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:33,608 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:33,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116213604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:33,608 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:33,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116213604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:33,609 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:33,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116213607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:33,609 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:33,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116213607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:33,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-20T15:22:33,714 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/1753df2ece4f4f4288ced5eb34838475 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/1753df2ece4f4f4288ced5eb34838475 2024-11-20T15:22:33,720 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/A of 6e158976a217b84d83e0feeee3ad6faf into 1753df2ece4f4f4288ced5eb34838475(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:33,720 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:33,720 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/A, priority=12, startTime=1732116153213; duration=0sec 2024-11-20T15:22:33,720 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:33,720 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:A 2024-11-20T15:22:33,720 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:22:33,723 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49988 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:22:33,723 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/C is initiating minor compaction (all files) 2024-11-20T15:22:33,724 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/C in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:33,724 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/9b4fe192dd91412f991cfd055d9ed2dd, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/a259f6a896ab45a48a1446f3202f56ad, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/92c2e2eb7ebc4071918a584bdae88f06, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/3643962304bf4dc697dc1923d2c7200d] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=48.8 K 2024-11-20T15:22:33,724 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b4fe192dd91412f991cfd055d9ed2dd, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732116150324 2024-11-20T15:22:33,725 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting a259f6a896ab45a48a1446f3202f56ad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732116150377 2024-11-20T15:22:33,725 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 92c2e2eb7ebc4071918a584bdae88f06, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732116151017 2024-11-20T15:22:33,725 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3643962304bf4dc697dc1923d2c7200d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1732116152204 2024-11-20T15:22:33,727 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/be147702d8ea46ea8535b68ad37a7a8e as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/be147702d8ea46ea8535b68ad37a7a8e 2024-11-20T15:22:33,733 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/B of 6e158976a217b84d83e0feeee3ad6faf into be147702d8ea46ea8535b68ad37a7a8e(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:33,733 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:33,733 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/B, priority=12, startTime=1732116153213; duration=0sec 2024-11-20T15:22:33,733 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:33,733 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:B 2024-11-20T15:22:33,750 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#C#compaction#310 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:33,751 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/0d0cf5814a3b4ee3859045d635866a40 is 50, key is test_row_0/C:col10/1732116152224/Put/seqid=0 2024-11-20T15:22:33,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742181_1357 (size=13221) 2024-11-20T15:22:33,802 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/0d0cf5814a3b4ee3859045d635866a40 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/0d0cf5814a3b4ee3859045d635866a40 2024-11-20T15:22:33,807 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/C of 6e158976a217b84d83e0feeee3ad6faf into 0d0cf5814a3b4ee3859045d635866a40(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:33,807 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:33,807 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/C, priority=12, startTime=1732116153213; duration=0sec 2024-11-20T15:22:33,807 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:33,807 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:C 2024-11-20T15:22:33,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:33,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116213810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:33,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:33,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116213810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:33,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:33,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116213811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:33,821 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:33,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116213811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:33,840 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/773a95e02580483bba88460af8d367c7 2024-11-20T15:22:33,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/1e0545aa6ae249fd8d46f79f360f7b8c is 50, key is test_row_0/C:col10/1732116152321/Put/seqid=0 2024-11-20T15:22:33,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742182_1358 (size=12301) 2024-11-20T15:22:34,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:34,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116214122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:34,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:34,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116214123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:34,130 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:34,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116214125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:34,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:34,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116214125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:34,331 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/1e0545aa6ae249fd8d46f79f360f7b8c 2024-11-20T15:22:34,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/ad4798377630477fbc57230ce8353f82 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/ad4798377630477fbc57230ce8353f82 2024-11-20T15:22:34,346 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/ad4798377630477fbc57230ce8353f82, entries=150, sequenceid=408, filesize=12.0 K 2024-11-20T15:22:34,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/773a95e02580483bba88460af8d367c7 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/773a95e02580483bba88460af8d367c7 2024-11-20T15:22:34,355 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/773a95e02580483bba88460af8d367c7, entries=150, sequenceid=408, filesize=12.0 K 2024-11-20T15:22:34,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/1e0545aa6ae249fd8d46f79f360f7b8c as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/1e0545aa6ae249fd8d46f79f360f7b8c 2024-11-20T15:22:34,369 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/1e0545aa6ae249fd8d46f79f360f7b8c, entries=150, sequenceid=408, filesize=12.0 K 2024-11-20T15:22:34,370 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 6e158976a217b84d83e0feeee3ad6faf in 1063ms, sequenceid=408, compaction requested=false 2024-11-20T15:22:34,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:34,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:34,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=96 2024-11-20T15:22:34,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=96 2024-11-20T15:22:34,376 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-11-20T15:22:34,376 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8470 sec 2024-11-20T15:22:34,381 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=95, table=TestAcidGuarantees in 1.8580 sec 2024-11-20T15:22:34,530 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T15:22:34,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:34,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:34,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:34,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:34,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:34,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:34,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:34,551 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/412c24043e774096ac557af986ef4eaf is 50, key is test_row_0/A:col10/1732116153494/Put/seqid=0 2024-11-20T15:22:34,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742183_1359 (size=14741) 2024-11-20T15:22:34,594 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=425 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/412c24043e774096ac557af986ef4eaf 2024-11-20T15:22:34,616 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/0a3032dde91d4dd6918c61169afa324b is 50, key is test_row_0/B:col10/1732116153494/Put/seqid=0 2024-11-20T15:22:34,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-20T15:22:34,629 INFO [Thread-1245 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 95 completed 2024-11-20T15:22:34,631 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:22:34,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=97, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=97, table=TestAcidGuarantees 2024-11-20T15:22:34,635 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=97, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=97, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:22:34,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-20T15:22:34,636 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=97, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=97, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:22:34,636 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:22:34,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:34,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116214633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:34,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:34,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116214633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:34,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:34,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116214634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:34,646 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:34,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116214642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:34,647 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:34,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116214643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:34,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742184_1360 (size=12301) 2024-11-20T15:22:34,675 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=425 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/0a3032dde91d4dd6918c61169afa324b 2024-11-20T15:22:34,700 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/1afd48aba4d649cfa446bb9ae5375033 is 50, key is test_row_0/C:col10/1732116153494/Put/seqid=0 2024-11-20T15:22:34,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742185_1361 (size=12301) 2024-11-20T15:22:34,735 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=425 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/1afd48aba4d649cfa446bb9ae5375033 2024-11-20T15:22:34,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-20T15:22:34,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/412c24043e774096ac557af986ef4eaf as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/412c24043e774096ac557af986ef4eaf 2024-11-20T15:22:34,752 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:34,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116214744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:34,769 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/412c24043e774096ac557af986ef4eaf, entries=200, sequenceid=425, filesize=14.4 K 2024-11-20T15:22:34,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:34,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/0a3032dde91d4dd6918c61169afa324b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/0a3032dde91d4dd6918c61169afa324b 2024-11-20T15:22:34,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116214748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:34,777 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/0a3032dde91d4dd6918c61169afa324b, entries=150, sequenceid=425, filesize=12.0 K 2024-11-20T15:22:34,778 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/1afd48aba4d649cfa446bb9ae5375033 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/1afd48aba4d649cfa446bb9ae5375033 2024-11-20T15:22:34,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:34,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116214770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:34,788 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:34,789 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=98 2024-11-20T15:22:34,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:34,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:34,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:34,789 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] handler.RSProcedureHandler(58): pid=98 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:34,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=98 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:34,790 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/1afd48aba4d649cfa446bb9ae5375033, entries=150, sequenceid=425, filesize=12.0 K 2024-11-20T15:22:34,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=98 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:34,792 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 6e158976a217b84d83e0feeee3ad6faf in 262ms, sequenceid=425, compaction requested=true 2024-11-20T15:22:34,792 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:34,793 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:34,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:22:34,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:34,794 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:34,794 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40263 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:34,794 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/A is initiating minor compaction (all files) 2024-11-20T15:22:34,794 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/A in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:34,794 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/1753df2ece4f4f4288ced5eb34838475, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/ad4798377630477fbc57230ce8353f82, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/412c24043e774096ac557af986ef4eaf] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=39.3 K 2024-11-20T15:22:34,795 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1753df2ece4f4f4288ced5eb34838475, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1732116152204 2024-11-20T15:22:34,796 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting ad4798377630477fbc57230ce8353f82, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1732116152321 2024-11-20T15:22:34,796 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:34,796 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/B is initiating minor compaction (all files) 2024-11-20T15:22:34,796 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/B in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:34,796 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/be147702d8ea46ea8535b68ad37a7a8e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/773a95e02580483bba88460af8d367c7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/0a3032dde91d4dd6918c61169afa324b] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=36.9 K 2024-11-20T15:22:34,796 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 412c24043e774096ac557af986ef4eaf, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=425, earliestPutTs=1732116153473 2024-11-20T15:22:34,797 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting be147702d8ea46ea8535b68ad37a7a8e, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1732116152204 2024-11-20T15:22:34,799 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 773a95e02580483bba88460af8d367c7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1732116152321 2024-11-20T15:22:34,799 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a3032dde91d4dd6918c61169afa324b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=425, earliestPutTs=1732116153473 2024-11-20T15:22:34,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:22:34,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:34,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:22:34,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:34,811 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#A#compaction#315 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:34,812 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/70acd4441bd9418192205ace0df01930 is 50, key is test_row_0/A:col10/1732116153494/Put/seqid=0 2024-11-20T15:22:34,820 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#B#compaction#316 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:34,821 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/5b73e865e04f4246a4db53b6c224af37 is 50, key is test_row_0/B:col10/1732116153494/Put/seqid=0 2024-11-20T15:22:34,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742186_1362 (size=13323) 2024-11-20T15:22:34,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742187_1363 (size=13323) 2024-11-20T15:22:34,903 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/5b73e865e04f4246a4db53b6c224af37 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/5b73e865e04f4246a4db53b6c224af37 2024-11-20T15:22:34,909 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/B of 6e158976a217b84d83e0feeee3ad6faf into 5b73e865e04f4246a4db53b6c224af37(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:34,909 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:34,909 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/B, priority=13, startTime=1732116154794; duration=0sec 2024-11-20T15:22:34,909 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:34,909 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:B 2024-11-20T15:22:34,909 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:34,910 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:34,910 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/C is initiating minor compaction (all files) 2024-11-20T15:22:34,910 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/C in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:34,911 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/0d0cf5814a3b4ee3859045d635866a40, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/1e0545aa6ae249fd8d46f79f360f7b8c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/1afd48aba4d649cfa446bb9ae5375033] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=36.9 K 2024-11-20T15:22:34,911 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d0cf5814a3b4ee3859045d635866a40, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=385, earliestPutTs=1732116152204 2024-11-20T15:22:34,912 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e0545aa6ae249fd8d46f79f360f7b8c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1732116152321 2024-11-20T15:22:34,912 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 1afd48aba4d649cfa446bb9ae5375033, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=425, earliestPutTs=1732116153473 2024-11-20T15:22:34,923 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#C#compaction#317 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:34,924 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/2460ee3b641b4a7a90c074076bff73ec is 50, key is test_row_0/C:col10/1732116153494/Put/seqid=0 2024-11-20T15:22:34,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-20T15:22:34,942 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:34,948 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=98 2024-11-20T15:22:34,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:34,948 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T15:22:34,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:34,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:34,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:34,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:34,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:34,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:34,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/ff565862623e4f2194581f51a238967a is 50, key is test_row_0/A:col10/1732116154630/Put/seqid=0 2024-11-20T15:22:34,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:34,977 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:34,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742188_1364 (size=13323) 2024-11-20T15:22:35,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742189_1365 (size=12301) 2024-11-20T15:22:35,013 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=448 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/ff565862623e4f2194581f51a238967a 2024-11-20T15:22:35,038 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:35,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116215031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:35,038 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:35,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116215034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:35,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/b6b592202abf41678cb52026936f044b is 50, key is test_row_0/B:col10/1732116154630/Put/seqid=0 2024-11-20T15:22:35,039 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:35,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116215035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:35,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742190_1366 (size=12301) 2024-11-20T15:22:35,145 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:35,145 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:35,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116215141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:35,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116215141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:35,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:35,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116215141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:35,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-20T15:22:35,274 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/70acd4441bd9418192205ace0df01930 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/70acd4441bd9418192205ace0df01930 2024-11-20T15:22:35,281 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/A of 6e158976a217b84d83e0feeee3ad6faf into 70acd4441bd9418192205ace0df01930(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:35,281 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:35,281 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/A, priority=13, startTime=1732116154792; duration=0sec 2024-11-20T15:22:35,282 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:35,282 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:A 2024-11-20T15:22:35,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:35,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116215347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:35,350 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:35,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116215348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:35,351 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:35,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116215348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:35,386 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/2460ee3b641b4a7a90c074076bff73ec as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/2460ee3b641b4a7a90c074076bff73ec 2024-11-20T15:22:35,400 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/C of 6e158976a217b84d83e0feeee3ad6faf into 2460ee3b641b4a7a90c074076bff73ec(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:35,400 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:35,400 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/C, priority=13, startTime=1732116154803; duration=0sec 2024-11-20T15:22:35,400 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:35,400 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:C 2024-11-20T15:22:35,490 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=448 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/b6b592202abf41678cb52026936f044b 2024-11-20T15:22:35,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/940b7d7256e14e8bb502e00d47fcd20c is 50, key is test_row_0/C:col10/1732116154630/Put/seqid=0 2024-11-20T15:22:35,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742191_1367 (size=12301) 2024-11-20T15:22:35,554 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=448 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/940b7d7256e14e8bb502e00d47fcd20c 2024-11-20T15:22:35,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/ff565862623e4f2194581f51a238967a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/ff565862623e4f2194581f51a238967a 2024-11-20T15:22:35,575 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/ff565862623e4f2194581f51a238967a, entries=150, sequenceid=448, filesize=12.0 K 2024-11-20T15:22:35,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/b6b592202abf41678cb52026936f044b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b6b592202abf41678cb52026936f044b 2024-11-20T15:22:35,584 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b6b592202abf41678cb52026936f044b, entries=150, sequenceid=448, filesize=12.0 K 2024-11-20T15:22:35,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/940b7d7256e14e8bb502e00d47fcd20c as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/940b7d7256e14e8bb502e00d47fcd20c 2024-11-20T15:22:35,590 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/940b7d7256e14e8bb502e00d47fcd20c, entries=150, sequenceid=448, filesize=12.0 K 2024-11-20T15:22:35,591 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=87.22 KB/89310 for 6e158976a217b84d83e0feeee3ad6faf in 643ms, sequenceid=448, compaction requested=false 2024-11-20T15:22:35,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:35,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:35,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=98 2024-11-20T15:22:35,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=98 2024-11-20T15:22:35,595 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-11-20T15:22:35,595 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 956 msec 2024-11-20T15:22:35,597 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=97, table=TestAcidGuarantees in 964 msec 2024-11-20T15:22:35,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:35,652 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T15:22:35,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:35,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:35,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:35,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:35,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:35,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:35,669 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/13fd15487c4b40faab45d0adb9c34c9d is 50, key is test_row_0/A:col10/1732116155650/Put/seqid=0 2024-11-20T15:22:35,685 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:35,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116215679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:35,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:35,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116215680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:35,689 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:35,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116215682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:35,689 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:35,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116215684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:35,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:35,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116215685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:35,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742192_1368 (size=14741) 2024-11-20T15:22:35,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-20T15:22:35,740 INFO [Thread-1245 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 97 completed 2024-11-20T15:22:35,741 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:22:35,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=99, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=99, table=TestAcidGuarantees 2024-11-20T15:22:35,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-11-20T15:22:35,743 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=99, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=99, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:22:35,744 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=99, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=99, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:22:35,744 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:22:35,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:35,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116215788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:35,794 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:35,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116215790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:35,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:35,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116215790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:35,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:35,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116215791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:35,797 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:35,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116215793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:35,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-11-20T15:22:35,896 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:35,897 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=100 2024-11-20T15:22:35,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:35,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:35,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:35,897 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=100}] handler.RSProcedureHandler(58): pid=100 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:35,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=100 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:35,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=100 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:35,995 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:35,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116215994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:35,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:35,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116215995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:35,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:35,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116215996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:36,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116215997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,002 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:36,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116216000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-11-20T15:22:36,049 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,051 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=100 2024-11-20T15:22:36,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:36,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:36,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:36,051 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] handler.RSProcedureHandler(58): pid=100 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:36,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=100 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:36,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=100 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:36,113 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=467 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/13fd15487c4b40faab45d0adb9c34c9d 2024-11-20T15:22:36,129 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/caa09c1cde644e7bb85cfe217ced3d80 is 50, key is test_row_0/B:col10/1732116155650/Put/seqid=0 2024-11-20T15:22:36,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742193_1369 (size=12301) 2024-11-20T15:22:36,172 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=467 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/caa09c1cde644e7bb85cfe217ced3d80 2024-11-20T15:22:36,182 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/55dcb86c9aed46878dfee6f4f1b24747 is 50, key is test_row_0/C:col10/1732116155650/Put/seqid=0 2024-11-20T15:22:36,204 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,205 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=100 2024-11-20T15:22:36,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:36,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:36,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:36,205 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] handler.RSProcedureHandler(58): pid=100 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:36,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=100 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:36,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=100 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:36,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742194_1370 (size=12301) 2024-11-20T15:22:36,217 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=467 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/55dcb86c9aed46878dfee6f4f1b24747 2024-11-20T15:22:36,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/13fd15487c4b40faab45d0adb9c34c9d as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/13fd15487c4b40faab45d0adb9c34c9d 2024-11-20T15:22:36,231 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/13fd15487c4b40faab45d0adb9c34c9d, entries=200, sequenceid=467, filesize=14.4 K 2024-11-20T15:22:36,232 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/caa09c1cde644e7bb85cfe217ced3d80 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/caa09c1cde644e7bb85cfe217ced3d80 2024-11-20T15:22:36,237 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/caa09c1cde644e7bb85cfe217ced3d80, entries=150, sequenceid=467, filesize=12.0 K 2024-11-20T15:22:36,238 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/55dcb86c9aed46878dfee6f4f1b24747 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/55dcb86c9aed46878dfee6f4f1b24747 2024-11-20T15:22:36,243 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/55dcb86c9aed46878dfee6f4f1b24747, entries=150, sequenceid=467, filesize=12.0 K 2024-11-20T15:22:36,244 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 6e158976a217b84d83e0feeee3ad6faf in 592ms, sequenceid=467, compaction requested=true 2024-11-20T15:22:36,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:36,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:22:36,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:36,244 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:36,246 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:36,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:22:36,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:36,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:22:36,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:36,246 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40365 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:36,246 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/A is initiating minor compaction (all files) 2024-11-20T15:22:36,246 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/A in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:36,247 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/70acd4441bd9418192205ace0df01930, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/ff565862623e4f2194581f51a238967a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/13fd15487c4b40faab45d0adb9c34c9d] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=39.4 K 2024-11-20T15:22:36,247 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:36,247 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/B is initiating minor compaction (all files) 2024-11-20T15:22:36,247 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/B in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:36,247 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/5b73e865e04f4246a4db53b6c224af37, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b6b592202abf41678cb52026936f044b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/caa09c1cde644e7bb85cfe217ced3d80] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=37.0 K 2024-11-20T15:22:36,247 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70acd4441bd9418192205ace0df01930, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=425, earliestPutTs=1732116153473 2024-11-20T15:22:36,248 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 5b73e865e04f4246a4db53b6c224af37, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=425, earliestPutTs=1732116153473 2024-11-20T15:22:36,248 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff565862623e4f2194581f51a238967a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=448, earliestPutTs=1732116154630 2024-11-20T15:22:36,248 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting b6b592202abf41678cb52026936f044b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=448, earliestPutTs=1732116154630 2024-11-20T15:22:36,248 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 13fd15487c4b40faab45d0adb9c34c9d, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1732116154994 2024-11-20T15:22:36,249 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting caa09c1cde644e7bb85cfe217ced3d80, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1732116155021 2024-11-20T15:22:36,276 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#B#compaction#324 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:36,277 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#A#compaction#325 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:36,277 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/2449b985e56d46e6b0294552c726258e is 50, key is test_row_0/B:col10/1732116155650/Put/seqid=0 2024-11-20T15:22:36,278 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/3eec693448ab4edcbfa2e916e925eeb5 is 50, key is test_row_0/A:col10/1732116155650/Put/seqid=0 2024-11-20T15:22:36,306 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T15:22:36,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:36,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:36,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:36,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:36,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:36,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:36,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:36,321 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/5e1bb3325ce6423d8ead5f2854c012a5 is 50, key is test_row_0/A:col10/1732116156300/Put/seqid=0 2024-11-20T15:22:36,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742195_1371 (size=13425) 2024-11-20T15:22:36,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:36,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116216324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:36,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116216328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:36,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116216329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,334 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:36,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116216330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742196_1372 (size=13425) 2024-11-20T15:22:36,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:36,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116216340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-11-20T15:22:36,352 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/3eec693448ab4edcbfa2e916e925eeb5 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/3eec693448ab4edcbfa2e916e925eeb5 2024-11-20T15:22:36,358 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,358 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=100 2024-11-20T15:22:36,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:36,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:36,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:36,358 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=100}] handler.RSProcedureHandler(58): pid=100 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:36,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=100 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:36,359 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/A of 6e158976a217b84d83e0feeee3ad6faf into 3eec693448ab4edcbfa2e916e925eeb5(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:36,359 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:36,359 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/A, priority=13, startTime=1732116156244; duration=0sec 2024-11-20T15:22:36,359 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:36,359 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:A 2024-11-20T15:22:36,359 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:36,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=100 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:36,360 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:36,360 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/C is initiating minor compaction (all files) 2024-11-20T15:22:36,360 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/C in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:36,361 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/2460ee3b641b4a7a90c074076bff73ec, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/940b7d7256e14e8bb502e00d47fcd20c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/55dcb86c9aed46878dfee6f4f1b24747] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=37.0 K 2024-11-20T15:22:36,361 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2460ee3b641b4a7a90c074076bff73ec, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=425, earliestPutTs=1732116153473 2024-11-20T15:22:36,362 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 940b7d7256e14e8bb502e00d47fcd20c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=448, earliestPutTs=1732116154630 2024-11-20T15:22:36,362 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 55dcb86c9aed46878dfee6f4f1b24747, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1732116155021 2024-11-20T15:22:36,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742197_1373 (size=14741) 2024-11-20T15:22:36,398 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=487 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/5e1bb3325ce6423d8ead5f2854c012a5 2024-11-20T15:22:36,402 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#C#compaction#327 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:36,402 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/26dd0f1f85184c189f89bf0d141854e8 is 50, key is test_row_0/C:col10/1732116155650/Put/seqid=0 2024-11-20T15:22:36,434 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/b5414fc546ee4783840fba10b33ca262 is 50, key is test_row_0/B:col10/1732116156300/Put/seqid=0 2024-11-20T15:22:36,436 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:36,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116216432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:36,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116216434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:36,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116216435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,441 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:36,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116216436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:36,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116216445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742198_1374 (size=13425) 2024-11-20T15:22:36,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742199_1375 (size=12301) 2024-11-20T15:22:36,510 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,510 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=100 2024-11-20T15:22:36,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:36,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:36,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:36,511 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] handler.RSProcedureHandler(58): pid=100 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:36,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=100 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:36,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=100 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:36,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:36,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116216639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:36,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116216640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,647 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:36,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116216643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,650 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:36,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116216646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,655 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:36,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116216651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,664 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,664 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=100 2024-11-20T15:22:36,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:36,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:36,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:36,665 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] handler.RSProcedureHandler(58): pid=100 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:36,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=100 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:36,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=100 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:36,731 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/2449b985e56d46e6b0294552c726258e as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/2449b985e56d46e6b0294552c726258e 2024-11-20T15:22:36,736 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/B of 6e158976a217b84d83e0feeee3ad6faf into 2449b985e56d46e6b0294552c726258e(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:36,736 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:36,736 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/B, priority=13, startTime=1732116156244; duration=0sec 2024-11-20T15:22:36,736 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:36,736 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:B 2024-11-20T15:22:36,816 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,817 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=100 2024-11-20T15:22:36,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:36,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:36,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:36,817 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=100}] handler.RSProcedureHandler(58): pid=100 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:36,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=100 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:36,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=100 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:36,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-11-20T15:22:36,873 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/26dd0f1f85184c189f89bf0d141854e8 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/26dd0f1f85184c189f89bf0d141854e8 2024-11-20T15:22:36,879 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/C of 6e158976a217b84d83e0feeee3ad6faf into 26dd0f1f85184c189f89bf0d141854e8(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:36,879 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:36,879 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/C, priority=13, startTime=1732116156246; duration=0sec 2024-11-20T15:22:36,879 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:36,879 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:C 2024-11-20T15:22:36,891 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=487 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/b5414fc546ee4783840fba10b33ca262 2024-11-20T15:22:36,912 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/4ea21120ec1c4635b5941af83180bd9b is 50, key is test_row_0/C:col10/1732116156300/Put/seqid=0 2024-11-20T15:22:36,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742200_1376 (size=12301) 2024-11-20T15:22:36,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:36,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116216945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:36,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116216946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:36,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116216949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,954 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:36,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116216954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:36,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116216959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,970 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:36,970 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=100 2024-11-20T15:22:36,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:36,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:36,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:36,970 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] handler.RSProcedureHandler(58): pid=100 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:36,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=100 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:36,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=100 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:37,122 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:37,128 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=100 2024-11-20T15:22:37,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:37,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:37,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:37,129 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] handler.RSProcedureHandler(58): pid=100 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:37,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=100 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:37,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=100 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:37,282 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:37,282 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=100 2024-11-20T15:22:37,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:37,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:37,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:37,283 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=100}] handler.RSProcedureHandler(58): pid=100 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:37,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=100 java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:37,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=100 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:37,342 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=487 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/4ea21120ec1c4635b5941af83180bd9b 2024-11-20T15:22:37,347 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/5e1bb3325ce6423d8ead5f2854c012a5 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/5e1bb3325ce6423d8ead5f2854c012a5 2024-11-20T15:22:37,353 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/5e1bb3325ce6423d8ead5f2854c012a5, entries=200, sequenceid=487, filesize=14.4 K 2024-11-20T15:22:37,354 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/b5414fc546ee4783840fba10b33ca262 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b5414fc546ee4783840fba10b33ca262 2024-11-20T15:22:37,358 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b5414fc546ee4783840fba10b33ca262, entries=150, sequenceid=487, filesize=12.0 K 2024-11-20T15:22:37,360 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/4ea21120ec1c4635b5941af83180bd9b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/4ea21120ec1c4635b5941af83180bd9b 2024-11-20T15:22:37,366 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/4ea21120ec1c4635b5941af83180bd9b, entries=150, sequenceid=487, filesize=12.0 K 2024-11-20T15:22:37,367 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 6e158976a217b84d83e0feeee3ad6faf in 1062ms, sequenceid=487, compaction requested=false 2024-11-20T15:22:37,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:37,435 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:37,437 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=100 2024-11-20T15:22:37,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:37,437 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T15:22:37,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:37,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:37,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:37,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:37,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:37,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:37,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/450d035a0e9149bcb421c5a64e839192 is 50, key is test_row_0/A:col10/1732116156326/Put/seqid=0 2024-11-20T15:22:37,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. as already flushing 2024-11-20T15:22:37,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:37,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742201_1377 (size=12301) 2024-11-20T15:22:37,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:37,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:37,488 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=507 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/450d035a0e9149bcb421c5a64e839192 2024-11-20T15:22:37,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116217480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:37,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116217481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:37,490 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:37,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116217482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:37,491 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:37,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116217486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:37,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:37,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116217487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:37,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/95cbcb2adcb04c4a8a8a77df13701644 is 50, key is test_row_0/B:col10/1732116156326/Put/seqid=0 2024-11-20T15:22:37,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742202_1378 (size=12301) 2024-11-20T15:22:37,558 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=507 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/95cbcb2adcb04c4a8a8a77df13701644 2024-11-20T15:22:37,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/efed1103c074405cac2c92e5fc898ed8 is 50, key is test_row_0/C:col10/1732116156326/Put/seqid=0 2024-11-20T15:22:37,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:37,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46040 deadline: 1732116217590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:37,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:37,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45990 deadline: 1732116217591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:37,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:37,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46052 deadline: 1732116217591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:37,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:37,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46028 deadline: 1732116217593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:37,600 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:37,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45992 deadline: 1732116217596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:37,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742203_1379 (size=12301) 2024-11-20T15:22:37,623 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=507 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/efed1103c074405cac2c92e5fc898ed8 2024-11-20T15:22:37,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/450d035a0e9149bcb421c5a64e839192 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/450d035a0e9149bcb421c5a64e839192 2024-11-20T15:22:37,643 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/450d035a0e9149bcb421c5a64e839192, entries=150, sequenceid=507, filesize=12.0 K 2024-11-20T15:22:37,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/95cbcb2adcb04c4a8a8a77df13701644 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/95cbcb2adcb04c4a8a8a77df13701644 2024-11-20T15:22:37,649 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/95cbcb2adcb04c4a8a8a77df13701644, entries=150, sequenceid=507, filesize=12.0 K 2024-11-20T15:22:37,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/efed1103c074405cac2c92e5fc898ed8 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/efed1103c074405cac2c92e5fc898ed8 2024-11-20T15:22:37,655 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/efed1103c074405cac2c92e5fc898ed8, entries=150, sequenceid=507, filesize=12.0 K 2024-11-20T15:22:37,656 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 6e158976a217b84d83e0feeee3ad6faf in 219ms, sequenceid=507, compaction requested=true 2024-11-20T15:22:37,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:37,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:37,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=100 2024-11-20T15:22:37,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=100 2024-11-20T15:22:37,659 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-11-20T15:22:37,659 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9130 sec 2024-11-20T15:22:37,661 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=99, table=TestAcidGuarantees in 1.9190 sec 2024-11-20T15:22:37,784 DEBUG [Thread-1250 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d68f787 to 127.0.0.1:62338 2024-11-20T15:22:37,784 DEBUG [Thread-1250 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:22:37,785 DEBUG [Thread-1254 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1730a60f to 127.0.0.1:62338 2024-11-20T15:22:37,785 DEBUG [Thread-1254 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:22:37,787 DEBUG [Thread-1252 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x10e6bf6a to 127.0.0.1:62338 2024-11-20T15:22:37,787 DEBUG [Thread-1252 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:22:37,788 DEBUG [Thread-1248 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6a0e9c8f to 127.0.0.1:62338 2024-11-20T15:22:37,788 DEBUG [Thread-1248 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:22:37,793 DEBUG [Thread-1246 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0eb04aeb to 127.0.0.1:62338 2024-11-20T15:22:37,793 DEBUG [Thread-1246 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:22:37,798 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T15:22:37,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:37,799 DEBUG [Thread-1235 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6862e3ce to 127.0.0.1:62338 2024-11-20T15:22:37,799 DEBUG [Thread-1235 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:22:37,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:37,800 DEBUG [Thread-1237 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d296fed to 127.0.0.1:62338 2024-11-20T15:22:37,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:37,800 DEBUG [Thread-1237 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:22:37,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:37,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:37,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:37,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:37,805 DEBUG [Thread-1241 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x43f04e0e to 127.0.0.1:62338 2024-11-20T15:22:37,805 DEBUG [Thread-1241 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:22:37,805 DEBUG [Thread-1239 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x08d0caa5 to 127.0.0.1:62338 2024-11-20T15:22:37,806 DEBUG [Thread-1239 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:22:37,807 DEBUG [Thread-1243 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x560ec309 to 127.0.0.1:62338 2024-11-20T15:22:37,807 DEBUG [Thread-1243 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:22:37,809 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/2620c87326004c71bcf2256546b25cf8 is 50, key is test_row_0/A:col10/1732116157481/Put/seqid=0 2024-11-20T15:22:37,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742204_1380 (size=12301) 2024-11-20T15:22:37,834 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=528 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/2620c87326004c71bcf2256546b25cf8 2024-11-20T15:22:37,841 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/b60369c03a3440c192925383d9ce7bec is 50, key is test_row_0/B:col10/1732116157481/Put/seqid=0 2024-11-20T15:22:37,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742205_1381 (size=12301) 2024-11-20T15:22:37,846 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=528 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/b60369c03a3440c192925383d9ce7bec 2024-11-20T15:22:37,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-11-20T15:22:37,848 INFO [Thread-1245 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 99 completed 2024-11-20T15:22:37,848 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T15:22:37,848 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 76 2024-11-20T15:22:37,848 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 86 2024-11-20T15:22:37,848 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 73 2024-11-20T15:22:37,848 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 106 2024-11-20T15:22:37,848 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 78 2024-11-20T15:22:37,848 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T15:22:37,848 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4945 2024-11-20T15:22:37,848 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4986 2024-11-20T15:22:37,848 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4928 2024-11-20T15:22:37,848 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5061 2024-11-20T15:22:37,848 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4958 2024-11-20T15:22:37,848 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T15:22:37,848 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T15:22:37,848 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3a569490 to 127.0.0.1:62338 2024-11-20T15:22:37,848 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:22:37,849 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T15:22:37,849 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T15:22:37,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=101, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T15:22:37,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=101 2024-11-20T15:22:37,852 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732116157852"}]},"ts":"1732116157852"} 2024-11-20T15:22:37,855 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T15:22:37,857 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T15:22:37,858 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T15:22:37,858 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/a36c84fa0e984aaa904cef09e98181fd is 50, key is test_row_0/C:col10/1732116157481/Put/seqid=0 2024-11-20T15:22:37,860 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e158976a217b84d83e0feeee3ad6faf, UNASSIGN}] 2024-11-20T15:22:37,861 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e158976a217b84d83e0feeee3ad6faf, UNASSIGN 2024-11-20T15:22:37,863 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=6e158976a217b84d83e0feeee3ad6faf, regionState=CLOSING, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:22:37,864 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T15:22:37,864 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=104, ppid=103, state=RUNNABLE; CloseRegionProcedure 6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954}] 2024-11-20T15:22:37,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742206_1382 (size=12301) 2024-11-20T15:22:37,865 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=528 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/a36c84fa0e984aaa904cef09e98181fd 2024-11-20T15:22:37,873 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/2620c87326004c71bcf2256546b25cf8 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/2620c87326004c71bcf2256546b25cf8 2024-11-20T15:22:37,883 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/2620c87326004c71bcf2256546b25cf8, entries=150, sequenceid=528, filesize=12.0 K 2024-11-20T15:22:37,885 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/b60369c03a3440c192925383d9ce7bec as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b60369c03a3440c192925383d9ce7bec 2024-11-20T15:22:37,891 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b60369c03a3440c192925383d9ce7bec, entries=150, sequenceid=528, filesize=12.0 K 2024-11-20T15:22:37,892 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/a36c84fa0e984aaa904cef09e98181fd as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/a36c84fa0e984aaa904cef09e98181fd 2024-11-20T15:22:37,897 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/a36c84fa0e984aaa904cef09e98181fd, entries=150, sequenceid=528, filesize=12.0 K 2024-11-20T15:22:37,898 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=20.13 KB/20610 for 6e158976a217b84d83e0feeee3ad6faf in 100ms, sequenceid=528, compaction requested=true 2024-11-20T15:22:37,898 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:37,898 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:22:37,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:22:37,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:37,899 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:22:37,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:22:37,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:37,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e158976a217b84d83e0feeee3ad6faf:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:22:37,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:37,900 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52768 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:22:37,900 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/A is initiating minor compaction (all files) 2024-11-20T15:22:37,900 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/A in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:37,900 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/3eec693448ab4edcbfa2e916e925eeb5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/5e1bb3325ce6423d8ead5f2854c012a5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/450d035a0e9149bcb421c5a64e839192, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/2620c87326004c71bcf2256546b25cf8] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=51.5 K 2024-11-20T15:22:37,900 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 3eec693448ab4edcbfa2e916e925eeb5, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1732116155021 2024-11-20T15:22:37,901 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50328 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:22:37,901 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/B is initiating minor compaction (all files) 2024-11-20T15:22:37,901 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/B in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:37,901 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/2449b985e56d46e6b0294552c726258e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b5414fc546ee4783840fba10b33ca262, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/95cbcb2adcb04c4a8a8a77df13701644, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b60369c03a3440c192925383d9ce7bec] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=49.1 K 2024-11-20T15:22:37,901 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e1bb3325ce6423d8ead5f2854c012a5, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=487, earliestPutTs=1732116155678 2024-11-20T15:22:37,901 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2449b985e56d46e6b0294552c726258e, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1732116155021 2024-11-20T15:22:37,902 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 450d035a0e9149bcb421c5a64e839192, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=507, earliestPutTs=1732116156326 2024-11-20T15:22:37,902 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting b5414fc546ee4783840fba10b33ca262, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=487, earliestPutTs=1732116155678 2024-11-20T15:22:37,903 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95cbcb2adcb04c4a8a8a77df13701644, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=507, earliestPutTs=1732116156326 2024-11-20T15:22:37,903 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 2620c87326004c71bcf2256546b25cf8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=528, earliestPutTs=1732116157481 2024-11-20T15:22:37,903 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting b60369c03a3440c192925383d9ce7bec, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=528, earliestPutTs=1732116157481 2024-11-20T15:22:37,933 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#B#compaction#336 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:37,934 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/7fb82f85d3cf49f3bb25b3f457573fcb is 50, key is test_row_0/B:col10/1732116157481/Put/seqid=0 2024-11-20T15:22:37,944 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#A#compaction#337 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:37,945 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/4a4acba8e86145ee960fcb92e329b095 is 50, key is test_row_0/A:col10/1732116157481/Put/seqid=0 2024-11-20T15:22:37,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=101 2024-11-20T15:22:37,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742207_1383 (size=13561) 2024-11-20T15:22:37,979 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/7fb82f85d3cf49f3bb25b3f457573fcb as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/7fb82f85d3cf49f3bb25b3f457573fcb 2024-11-20T15:22:37,985 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/B of 6e158976a217b84d83e0feeee3ad6faf into 7fb82f85d3cf49f3bb25b3f457573fcb(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:37,985 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:37,985 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/B, priority=12, startTime=1732116157899; duration=0sec 2024-11-20T15:22:37,985 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:37,985 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:B 2024-11-20T15:22:37,985 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:22:37,986 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50328 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:22:37,987 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 6e158976a217b84d83e0feeee3ad6faf/C is initiating minor compaction (all files) 2024-11-20T15:22:37,987 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6e158976a217b84d83e0feeee3ad6faf/C in TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:37,987 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/26dd0f1f85184c189f89bf0d141854e8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/4ea21120ec1c4635b5941af83180bd9b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/efed1103c074405cac2c92e5fc898ed8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/a36c84fa0e984aaa904cef09e98181fd] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp, totalSize=49.1 K 2024-11-20T15:22:37,987 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 26dd0f1f85184c189f89bf0d141854e8, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1732116155021 2024-11-20T15:22:37,988 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ea21120ec1c4635b5941af83180bd9b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=487, earliestPutTs=1732116155678 2024-11-20T15:22:37,988 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting efed1103c074405cac2c92e5fc898ed8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=507, earliestPutTs=1732116156326 2024-11-20T15:22:37,989 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting a36c84fa0e984aaa904cef09e98181fd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=528, earliestPutTs=1732116157481 2024-11-20T15:22:37,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742208_1384 (size=13561) 2024-11-20T15:22:38,001 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e158976a217b84d83e0feeee3ad6faf#C#compaction#338 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:38,002 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/32de1bd5858243e59305a7703d06fdbc is 50, key is test_row_0/C:col10/1732116157481/Put/seqid=0 2024-11-20T15:22:38,008 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/4a4acba8e86145ee960fcb92e329b095 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/4a4acba8e86145ee960fcb92e329b095 2024-11-20T15:22:38,014 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/A of 6e158976a217b84d83e0feeee3ad6faf into 4a4acba8e86145ee960fcb92e329b095(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:38,015 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:38,015 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/A, priority=12, startTime=1732116157898; duration=0sec 2024-11-20T15:22:38,015 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:38,015 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:A 2024-11-20T15:22:38,016 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:38,016 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(124): Close 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:38,016 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T15:22:38,016 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1681): Closing 6e158976a217b84d83e0feeee3ad6faf, disabling compactions & flushes 2024-11-20T15:22:38,016 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1942): waiting for 1 compactions to complete for region TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:38,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742209_1385 (size=13561) 2024-11-20T15:22:38,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=101 2024-11-20T15:22:38,386 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T15:22:38,433 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/32de1bd5858243e59305a7703d06fdbc as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/32de1bd5858243e59305a7703d06fdbc 2024-11-20T15:22:38,438 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6e158976a217b84d83e0feeee3ad6faf/C of 6e158976a217b84d83e0feeee3ad6faf into 32de1bd5858243e59305a7703d06fdbc(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:38,438 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:38,438 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:38,438 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. after waiting 0 ms 2024-11-20T15:22:38,438 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:38,438 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(2837): Flushing 6e158976a217b84d83e0feeee3ad6faf 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-20T15:22:38,439 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=A 2024-11-20T15:22:38,439 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:38,439 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=B 2024-11-20T15:22:38,439 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:38,439 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6e158976a217b84d83e0feeee3ad6faf, store=C 2024-11-20T15:22:38,439 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:38,441 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:38,441 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf., storeName=6e158976a217b84d83e0feeee3ad6faf/C, priority=12, startTime=1732116157899; duration=0sec 2024-11-20T15:22:38,441 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:38,443 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e158976a217b84d83e0feeee3ad6faf:C 2024-11-20T15:22:38,444 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/10fa7ce5405344bfa41616de7feda1f2 is 50, key is test_row_0/A:col10/1732116157800/Put/seqid=0 2024-11-20T15:22:38,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742210_1386 (size=9857) 2024-11-20T15:22:38,452 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=537 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/10fa7ce5405344bfa41616de7feda1f2 2024-11-20T15:22:38,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=101 2024-11-20T15:22:38,459 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/6a85a17f95c741569451bdb7f75be3f1 is 50, key is test_row_0/B:col10/1732116157800/Put/seqid=0 2024-11-20T15:22:38,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742211_1387 (size=9857) 2024-11-20T15:22:38,488 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=537 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/6a85a17f95c741569451bdb7f75be3f1 2024-11-20T15:22:38,495 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/b77f8b8cba0c4b918b9c4c4296409715 is 50, key is test_row_0/C:col10/1732116157800/Put/seqid=0 2024-11-20T15:22:38,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742212_1388 (size=9857) 2024-11-20T15:22:38,901 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=537 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/b77f8b8cba0c4b918b9c4c4296409715 2024-11-20T15:22:38,907 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/A/10fa7ce5405344bfa41616de7feda1f2 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/10fa7ce5405344bfa41616de7feda1f2 2024-11-20T15:22:38,914 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/10fa7ce5405344bfa41616de7feda1f2, entries=100, sequenceid=537, filesize=9.6 K 2024-11-20T15:22:38,915 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/B/6a85a17f95c741569451bdb7f75be3f1 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/6a85a17f95c741569451bdb7f75be3f1 2024-11-20T15:22:38,919 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/6a85a17f95c741569451bdb7f75be3f1, entries=100, sequenceid=537, filesize=9.6 K 2024-11-20T15:22:38,919 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/.tmp/C/b77f8b8cba0c4b918b9c4c4296409715 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/b77f8b8cba0c4b918b9c4c4296409715 2024-11-20T15:22:38,924 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/b77f8b8cba0c4b918b9c4c4296409715, entries=100, sequenceid=537, filesize=9.6 K 2024-11-20T15:22:38,925 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 6e158976a217b84d83e0feeee3ad6faf in 487ms, sequenceid=537, compaction requested=false 2024-11-20T15:22:38,926 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/a2e64598dee34328993d86f469d7f50a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/27c54bc1fee5481bb0989c062ef351cb, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/d881268b5b62439fa1036f81b2457195, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/a6851546ee864741a2f6077bc36ea4dc, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/75e3eb8ba0eb4fe3872b38117c2fa6e4, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/967f937339d6420cbbf8886dc6c6cbf3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/853c5a73d6dc442f823675425a4f25f6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/5eb22de5fefb4ab9a583e5f1b7b37315, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/2e82b92e4bf543dba7cbcceac91203e3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/d27becdf76e14a9d8042508cd8157ff2, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/9b989a85e28d4598882edb4e546427de, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/bc6afc7556954058bf134a39493b24d6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/73f49dedbeac49d09120b08b91e3f545, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/cbd61c135fc94235af482703ceff0d87, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/146fef951ae54d63b5ce7f6fbd1dec99, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/b35073f8fa2e493b89adda3c188355a2, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/54c38754b70349e7917de33e6016dd14, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/a697347fe34749b3a35fea02e08b00f1, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/18de11281ed4491197c258311e46b5e8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/b48b4a6982354a3da9a0be8f55648275, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/65c8741d3fb14baf860d162e070d37b6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/a9f3638f745346d3b7304eefcbf07709, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/55509dd133ff4d22885b023716baa940, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/d7ae2b4aced84d398049b4753452c10c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/e45fd3038fdd4cf482b3c4b60048bd03, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/0145124deeaa48bdb2b9749fc5cca07b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/339c6c485cda44c1b6ff2a8397f471b3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/1753df2ece4f4f4288ced5eb34838475, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/ad4798377630477fbc57230ce8353f82, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/412c24043e774096ac557af986ef4eaf, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/70acd4441bd9418192205ace0df01930, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/ff565862623e4f2194581f51a238967a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/13fd15487c4b40faab45d0adb9c34c9d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/3eec693448ab4edcbfa2e916e925eeb5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/5e1bb3325ce6423d8ead5f2854c012a5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/450d035a0e9149bcb421c5a64e839192, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/2620c87326004c71bcf2256546b25cf8] to archive 2024-11-20T15:22:38,931 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T15:22:38,934 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/a2e64598dee34328993d86f469d7f50a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/a2e64598dee34328993d86f469d7f50a 2024-11-20T15:22:38,935 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/27c54bc1fee5481bb0989c062ef351cb to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/27c54bc1fee5481bb0989c062ef351cb 2024-11-20T15:22:38,937 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/d881268b5b62439fa1036f81b2457195 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/d881268b5b62439fa1036f81b2457195 2024-11-20T15:22:38,940 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/a6851546ee864741a2f6077bc36ea4dc to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/a6851546ee864741a2f6077bc36ea4dc 2024-11-20T15:22:38,941 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/75e3eb8ba0eb4fe3872b38117c2fa6e4 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/75e3eb8ba0eb4fe3872b38117c2fa6e4 2024-11-20T15:22:38,948 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/967f937339d6420cbbf8886dc6c6cbf3 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/967f937339d6420cbbf8886dc6c6cbf3 2024-11-20T15:22:38,950 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/853c5a73d6dc442f823675425a4f25f6 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/853c5a73d6dc442f823675425a4f25f6 2024-11-20T15:22:38,953 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/5eb22de5fefb4ab9a583e5f1b7b37315 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/5eb22de5fefb4ab9a583e5f1b7b37315 2024-11-20T15:22:38,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=101 2024-11-20T15:22:38,957 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/2e82b92e4bf543dba7cbcceac91203e3 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/2e82b92e4bf543dba7cbcceac91203e3 2024-11-20T15:22:38,959 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/d27becdf76e14a9d8042508cd8157ff2 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/d27becdf76e14a9d8042508cd8157ff2 2024-11-20T15:22:38,960 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/9b989a85e28d4598882edb4e546427de to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/9b989a85e28d4598882edb4e546427de 2024-11-20T15:22:38,963 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/bc6afc7556954058bf134a39493b24d6 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/bc6afc7556954058bf134a39493b24d6 2024-11-20T15:22:38,965 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/73f49dedbeac49d09120b08b91e3f545 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/73f49dedbeac49d09120b08b91e3f545 2024-11-20T15:22:38,966 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/cbd61c135fc94235af482703ceff0d87 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/cbd61c135fc94235af482703ceff0d87 2024-11-20T15:22:38,967 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/146fef951ae54d63b5ce7f6fbd1dec99 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/146fef951ae54d63b5ce7f6fbd1dec99 2024-11-20T15:22:38,968 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/b35073f8fa2e493b89adda3c188355a2 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/b35073f8fa2e493b89adda3c188355a2 2024-11-20T15:22:38,969 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/54c38754b70349e7917de33e6016dd14 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/54c38754b70349e7917de33e6016dd14 2024-11-20T15:22:38,970 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/a697347fe34749b3a35fea02e08b00f1 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/a697347fe34749b3a35fea02e08b00f1 2024-11-20T15:22:38,971 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/18de11281ed4491197c258311e46b5e8 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/18de11281ed4491197c258311e46b5e8 2024-11-20T15:22:38,973 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/b48b4a6982354a3da9a0be8f55648275 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/b48b4a6982354a3da9a0be8f55648275 2024-11-20T15:22:38,974 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/65c8741d3fb14baf860d162e070d37b6 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/65c8741d3fb14baf860d162e070d37b6 2024-11-20T15:22:38,975 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/a9f3638f745346d3b7304eefcbf07709 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/a9f3638f745346d3b7304eefcbf07709 2024-11-20T15:22:38,976 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/55509dd133ff4d22885b023716baa940 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/55509dd133ff4d22885b023716baa940 2024-11-20T15:22:38,977 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/d7ae2b4aced84d398049b4753452c10c to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/d7ae2b4aced84d398049b4753452c10c 2024-11-20T15:22:38,978 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/e45fd3038fdd4cf482b3c4b60048bd03 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/e45fd3038fdd4cf482b3c4b60048bd03 2024-11-20T15:22:38,979 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/0145124deeaa48bdb2b9749fc5cca07b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/0145124deeaa48bdb2b9749fc5cca07b 2024-11-20T15:22:38,980 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/339c6c485cda44c1b6ff2a8397f471b3 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/339c6c485cda44c1b6ff2a8397f471b3 2024-11-20T15:22:38,981 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/1753df2ece4f4f4288ced5eb34838475 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/1753df2ece4f4f4288ced5eb34838475 2024-11-20T15:22:38,982 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/ad4798377630477fbc57230ce8353f82 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/ad4798377630477fbc57230ce8353f82 2024-11-20T15:22:38,983 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/412c24043e774096ac557af986ef4eaf to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/412c24043e774096ac557af986ef4eaf 2024-11-20T15:22:38,985 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/70acd4441bd9418192205ace0df01930 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/70acd4441bd9418192205ace0df01930 2024-11-20T15:22:38,987 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/ff565862623e4f2194581f51a238967a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/ff565862623e4f2194581f51a238967a 2024-11-20T15:22:38,988 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/13fd15487c4b40faab45d0adb9c34c9d to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/13fd15487c4b40faab45d0adb9c34c9d 2024-11-20T15:22:38,989 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/3eec693448ab4edcbfa2e916e925eeb5 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/3eec693448ab4edcbfa2e916e925eeb5 2024-11-20T15:22:38,990 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/5e1bb3325ce6423d8ead5f2854c012a5 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/5e1bb3325ce6423d8ead5f2854c012a5 2024-11-20T15:22:38,991 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/450d035a0e9149bcb421c5a64e839192 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/450d035a0e9149bcb421c5a64e839192 2024-11-20T15:22:38,992 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/2620c87326004c71bcf2256546b25cf8 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/2620c87326004c71bcf2256546b25cf8 2024-11-20T15:22:38,993 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/ef132a4a4784415fae5d419afc323c31, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/48553e6cb8484825b80e1dfb740d517e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/e52fa97cd32d4c65bfe9d854b5241592, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3e394429d12246779e81eb1708e32fa5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3cb5856de83643e085ee3ed70f4ec069, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/5763d7236ae04e5a9e2669027d0e7876, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/9cd5f892e47640db8cfa737f3e269917, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/a88ea9dec5fa4d86aac4c5f41af577a6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/22f02b05372a481fb5e2cdf0425d3975, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/313d31bf05aa450cad02f11a041263f8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/108f0a8a384e46f98b37cb6018a0fdcb, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b0a3ff9d3494446c93e384797ab6d8fe, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/fe4c6b2bb31a4a6797cbb7d9aa7609ff, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3f88e986172e45d6a02904c711e51c0a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/a8d6157658554820a9b43bdec60f1681, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/872777e7133b459e87b2d8a0fa8ce56d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/2a0cc61538a44b7f9f6b456be5a829dc, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/d8f953716cc44d35a8ed477e2329f946, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3e29441a10204f98b31e1c1b6b802e5e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/1a358e55823d4394a3198f77dd6a8c0f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b6feb6cf74e24b34b1e00fe704744741, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/6c318c266e364b039ed59b2c57e9642e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/af9d113f02604b868df975653d4aa7d6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3db9db2db0274215a0539e4a9ca3c69c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/ceb93200d0214bf6916e22e641901449, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/de4d5b0a177a4648bc859e9897b5139f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/be147702d8ea46ea8535b68ad37a7a8e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/37a160ded8d54d97852bd1a60c5d02a4, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/773a95e02580483bba88460af8d367c7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/5b73e865e04f4246a4db53b6c224af37, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/0a3032dde91d4dd6918c61169afa324b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b6b592202abf41678cb52026936f044b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/2449b985e56d46e6b0294552c726258e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/caa09c1cde644e7bb85cfe217ced3d80, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b5414fc546ee4783840fba10b33ca262, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/95cbcb2adcb04c4a8a8a77df13701644, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b60369c03a3440c192925383d9ce7bec] to archive 2024-11-20T15:22:38,994 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T15:22:38,995 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/ef132a4a4784415fae5d419afc323c31 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/ef132a4a4784415fae5d419afc323c31 2024-11-20T15:22:38,997 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/48553e6cb8484825b80e1dfb740d517e to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/48553e6cb8484825b80e1dfb740d517e 2024-11-20T15:22:38,998 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/e52fa97cd32d4c65bfe9d854b5241592 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/e52fa97cd32d4c65bfe9d854b5241592 2024-11-20T15:22:38,999 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3e394429d12246779e81eb1708e32fa5 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3e394429d12246779e81eb1708e32fa5 2024-11-20T15:22:39,000 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3cb5856de83643e085ee3ed70f4ec069 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3cb5856de83643e085ee3ed70f4ec069 2024-11-20T15:22:39,001 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/5763d7236ae04e5a9e2669027d0e7876 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/5763d7236ae04e5a9e2669027d0e7876 2024-11-20T15:22:39,002 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/9cd5f892e47640db8cfa737f3e269917 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/9cd5f892e47640db8cfa737f3e269917 2024-11-20T15:22:39,004 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/a88ea9dec5fa4d86aac4c5f41af577a6 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/a88ea9dec5fa4d86aac4c5f41af577a6 2024-11-20T15:22:39,005 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/22f02b05372a481fb5e2cdf0425d3975 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/22f02b05372a481fb5e2cdf0425d3975 2024-11-20T15:22:39,006 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/313d31bf05aa450cad02f11a041263f8 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/313d31bf05aa450cad02f11a041263f8 2024-11-20T15:22:39,007 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/108f0a8a384e46f98b37cb6018a0fdcb to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/108f0a8a384e46f98b37cb6018a0fdcb 2024-11-20T15:22:39,008 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b0a3ff9d3494446c93e384797ab6d8fe to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b0a3ff9d3494446c93e384797ab6d8fe 2024-11-20T15:22:39,009 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/fe4c6b2bb31a4a6797cbb7d9aa7609ff to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/fe4c6b2bb31a4a6797cbb7d9aa7609ff 2024-11-20T15:22:39,012 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3f88e986172e45d6a02904c711e51c0a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3f88e986172e45d6a02904c711e51c0a 2024-11-20T15:22:39,013 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/a8d6157658554820a9b43bdec60f1681 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/a8d6157658554820a9b43bdec60f1681 2024-11-20T15:22:39,015 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/872777e7133b459e87b2d8a0fa8ce56d to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/872777e7133b459e87b2d8a0fa8ce56d 2024-11-20T15:22:39,016 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/2a0cc61538a44b7f9f6b456be5a829dc to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/2a0cc61538a44b7f9f6b456be5a829dc 2024-11-20T15:22:39,019 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/d8f953716cc44d35a8ed477e2329f946 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/d8f953716cc44d35a8ed477e2329f946 2024-11-20T15:22:39,020 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3e29441a10204f98b31e1c1b6b802e5e to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3e29441a10204f98b31e1c1b6b802e5e 2024-11-20T15:22:39,021 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/1a358e55823d4394a3198f77dd6a8c0f to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/1a358e55823d4394a3198f77dd6a8c0f 2024-11-20T15:22:39,022 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b6feb6cf74e24b34b1e00fe704744741 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b6feb6cf74e24b34b1e00fe704744741 2024-11-20T15:22:39,024 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/6c318c266e364b039ed59b2c57e9642e to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/6c318c266e364b039ed59b2c57e9642e 2024-11-20T15:22:39,025 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/af9d113f02604b868df975653d4aa7d6 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/af9d113f02604b868df975653d4aa7d6 2024-11-20T15:22:39,026 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3db9db2db0274215a0539e4a9ca3c69c to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/3db9db2db0274215a0539e4a9ca3c69c 2024-11-20T15:22:39,028 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/ceb93200d0214bf6916e22e641901449 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/ceb93200d0214bf6916e22e641901449 2024-11-20T15:22:39,029 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/de4d5b0a177a4648bc859e9897b5139f to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/de4d5b0a177a4648bc859e9897b5139f 2024-11-20T15:22:39,030 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/be147702d8ea46ea8535b68ad37a7a8e to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/be147702d8ea46ea8535b68ad37a7a8e 2024-11-20T15:22:39,031 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/37a160ded8d54d97852bd1a60c5d02a4 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/37a160ded8d54d97852bd1a60c5d02a4 2024-11-20T15:22:39,033 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/773a95e02580483bba88460af8d367c7 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/773a95e02580483bba88460af8d367c7 2024-11-20T15:22:39,034 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/5b73e865e04f4246a4db53b6c224af37 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/5b73e865e04f4246a4db53b6c224af37 2024-11-20T15:22:39,035 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/0a3032dde91d4dd6918c61169afa324b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/0a3032dde91d4dd6918c61169afa324b 2024-11-20T15:22:39,036 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b6b592202abf41678cb52026936f044b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b6b592202abf41678cb52026936f044b 2024-11-20T15:22:39,037 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/2449b985e56d46e6b0294552c726258e to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/2449b985e56d46e6b0294552c726258e 2024-11-20T15:22:39,038 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/caa09c1cde644e7bb85cfe217ced3d80 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/caa09c1cde644e7bb85cfe217ced3d80 2024-11-20T15:22:39,040 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b5414fc546ee4783840fba10b33ca262 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b5414fc546ee4783840fba10b33ca262 2024-11-20T15:22:39,041 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/95cbcb2adcb04c4a8a8a77df13701644 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/95cbcb2adcb04c4a8a8a77df13701644 2024-11-20T15:22:39,043 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b60369c03a3440c192925383d9ce7bec to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/b60369c03a3440c192925383d9ce7bec 2024-11-20T15:22:39,052 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/7b1a7daa31a5447098fcb91313ef1884, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/1bcaca71aaa14b9baffabaea4bdcd4cb, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/7e18209cbc244cae9d30e0cfe439a6da, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/034b3ed523064665b7b76f8ed2814839, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/d87f8ebd722a49819de1abd679adaf41, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/024d7cc4047f434d90b18707b20a9fdf, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/43598885ecd1487b9c1a1cd09373f7bb, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/a0fa50127d4642adbfaec422ed4de404, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/a2275c827a5940ea9ecdc22d62c606e4, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/fded501adc31491783bcab67191756c2, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/22375681ecf24b4781eb19ad0722e75f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/1bdd3445e3a5441c9ee81690136b772c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/0b17f4a8a30f40d1a7d4695c8029ee34, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/9b3ae9d916f24d438b607e24c83f2dcf, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/f6e2b3626e1046f19dba5a97dd25e9d2, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/eb533810fbd0441fad58b2b9a3777b63, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/2e36ca9a30f0471a89411e5191fb7241, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/d550fd6b6a174ea980d2f5d9b660ee72, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/c09d0c54dfe3455b90104a9b45bd961f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/98ea6dfc457e4df99e54550ea6153c1a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/54c8fcba1fbc4efab6a0cbd57db7836a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/81ce4d93fa2b4e0480c7bfe121654419, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/9b4fe192dd91412f991cfd055d9ed2dd, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/0e2d34b919ee4df2b1a7f5af9865b93f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/a259f6a896ab45a48a1446f3202f56ad, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/92c2e2eb7ebc4071918a584bdae88f06, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/0d0cf5814a3b4ee3859045d635866a40, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/3643962304bf4dc697dc1923d2c7200d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/1e0545aa6ae249fd8d46f79f360f7b8c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/2460ee3b641b4a7a90c074076bff73ec, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/1afd48aba4d649cfa446bb9ae5375033, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/940b7d7256e14e8bb502e00d47fcd20c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/26dd0f1f85184c189f89bf0d141854e8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/55dcb86c9aed46878dfee6f4f1b24747, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/4ea21120ec1c4635b5941af83180bd9b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/efed1103c074405cac2c92e5fc898ed8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/a36c84fa0e984aaa904cef09e98181fd] to archive 2024-11-20T15:22:39,053 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T15:22:39,058 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/7b1a7daa31a5447098fcb91313ef1884 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/7b1a7daa31a5447098fcb91313ef1884 2024-11-20T15:22:39,061 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/1bcaca71aaa14b9baffabaea4bdcd4cb to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/1bcaca71aaa14b9baffabaea4bdcd4cb 2024-11-20T15:22:39,065 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/7e18209cbc244cae9d30e0cfe439a6da to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/7e18209cbc244cae9d30e0cfe439a6da 2024-11-20T15:22:39,066 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/034b3ed523064665b7b76f8ed2814839 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/034b3ed523064665b7b76f8ed2814839 2024-11-20T15:22:39,067 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/d87f8ebd722a49819de1abd679adaf41 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/d87f8ebd722a49819de1abd679adaf41 2024-11-20T15:22:39,069 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/024d7cc4047f434d90b18707b20a9fdf to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/024d7cc4047f434d90b18707b20a9fdf 2024-11-20T15:22:39,070 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/43598885ecd1487b9c1a1cd09373f7bb to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/43598885ecd1487b9c1a1cd09373f7bb 2024-11-20T15:22:39,071 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/a0fa50127d4642adbfaec422ed4de404 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/a0fa50127d4642adbfaec422ed4de404 2024-11-20T15:22:39,073 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/a2275c827a5940ea9ecdc22d62c606e4 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/a2275c827a5940ea9ecdc22d62c606e4 2024-11-20T15:22:39,075 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/fded501adc31491783bcab67191756c2 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/fded501adc31491783bcab67191756c2 2024-11-20T15:22:39,076 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/22375681ecf24b4781eb19ad0722e75f to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/22375681ecf24b4781eb19ad0722e75f 2024-11-20T15:22:39,078 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/1bdd3445e3a5441c9ee81690136b772c to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/1bdd3445e3a5441c9ee81690136b772c 2024-11-20T15:22:39,079 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/0b17f4a8a30f40d1a7d4695c8029ee34 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/0b17f4a8a30f40d1a7d4695c8029ee34 2024-11-20T15:22:39,080 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/9b3ae9d916f24d438b607e24c83f2dcf to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/9b3ae9d916f24d438b607e24c83f2dcf 2024-11-20T15:22:39,083 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/f6e2b3626e1046f19dba5a97dd25e9d2 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/f6e2b3626e1046f19dba5a97dd25e9d2 2024-11-20T15:22:39,089 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/eb533810fbd0441fad58b2b9a3777b63 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/eb533810fbd0441fad58b2b9a3777b63 2024-11-20T15:22:39,090 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/2e36ca9a30f0471a89411e5191fb7241 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/2e36ca9a30f0471a89411e5191fb7241 2024-11-20T15:22:39,092 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/d550fd6b6a174ea980d2f5d9b660ee72 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/d550fd6b6a174ea980d2f5d9b660ee72 2024-11-20T15:22:39,094 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/c09d0c54dfe3455b90104a9b45bd961f to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/c09d0c54dfe3455b90104a9b45bd961f 2024-11-20T15:22:39,096 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/98ea6dfc457e4df99e54550ea6153c1a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/98ea6dfc457e4df99e54550ea6153c1a 2024-11-20T15:22:39,098 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/54c8fcba1fbc4efab6a0cbd57db7836a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/54c8fcba1fbc4efab6a0cbd57db7836a 2024-11-20T15:22:39,100 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/81ce4d93fa2b4e0480c7bfe121654419 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/81ce4d93fa2b4e0480c7bfe121654419 2024-11-20T15:22:39,102 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/9b4fe192dd91412f991cfd055d9ed2dd to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/9b4fe192dd91412f991cfd055d9ed2dd 2024-11-20T15:22:39,103 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/0e2d34b919ee4df2b1a7f5af9865b93f to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/0e2d34b919ee4df2b1a7f5af9865b93f 2024-11-20T15:22:39,106 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/a259f6a896ab45a48a1446f3202f56ad to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/a259f6a896ab45a48a1446f3202f56ad 2024-11-20T15:22:39,110 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/92c2e2eb7ebc4071918a584bdae88f06 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/92c2e2eb7ebc4071918a584bdae88f06 2024-11-20T15:22:39,111 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/0d0cf5814a3b4ee3859045d635866a40 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/0d0cf5814a3b4ee3859045d635866a40 2024-11-20T15:22:39,114 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/3643962304bf4dc697dc1923d2c7200d to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/3643962304bf4dc697dc1923d2c7200d 2024-11-20T15:22:39,115 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/1e0545aa6ae249fd8d46f79f360f7b8c to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/1e0545aa6ae249fd8d46f79f360f7b8c 2024-11-20T15:22:39,120 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/2460ee3b641b4a7a90c074076bff73ec to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/2460ee3b641b4a7a90c074076bff73ec 2024-11-20T15:22:39,124 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/1afd48aba4d649cfa446bb9ae5375033 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/1afd48aba4d649cfa446bb9ae5375033 2024-11-20T15:22:39,129 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/940b7d7256e14e8bb502e00d47fcd20c to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/940b7d7256e14e8bb502e00d47fcd20c 2024-11-20T15:22:39,132 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/26dd0f1f85184c189f89bf0d141854e8 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/26dd0f1f85184c189f89bf0d141854e8 2024-11-20T15:22:39,136 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/55dcb86c9aed46878dfee6f4f1b24747 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/55dcb86c9aed46878dfee6f4f1b24747 2024-11-20T15:22:39,140 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/4ea21120ec1c4635b5941af83180bd9b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/4ea21120ec1c4635b5941af83180bd9b 2024-11-20T15:22:39,144 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/efed1103c074405cac2c92e5fc898ed8 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/efed1103c074405cac2c92e5fc898ed8 2024-11-20T15:22:39,146 DEBUG [StoreCloser-TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/a36c84fa0e984aaa904cef09e98181fd to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/a36c84fa0e984aaa904cef09e98181fd 2024-11-20T15:22:39,160 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/recovered.edits/540.seqid, newMaxSeqId=540, maxSeqId=1 2024-11-20T15:22:39,161 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf. 2024-11-20T15:22:39,162 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1635): Region close journal for 6e158976a217b84d83e0feeee3ad6faf: 2024-11-20T15:22:39,175 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(170): Closed 6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:39,175 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=6e158976a217b84d83e0feeee3ad6faf, regionState=CLOSED 2024-11-20T15:22:39,180 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=104, resume processing ppid=103 2024-11-20T15:22:39,180 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, ppid=103, state=SUCCESS; CloseRegionProcedure 6e158976a217b84d83e0feeee3ad6faf, server=0b62285ead89,33387,1732116069954 in 1.3130 sec 2024-11-20T15:22:39,184 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-11-20T15:22:39,184 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6e158976a217b84d83e0feeee3ad6faf, UNASSIGN in 1.3200 sec 2024-11-20T15:22:39,188 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-11-20T15:22:39,188 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.3260 sec 2024-11-20T15:22:39,192 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732116159192"}]},"ts":"1732116159192"} 2024-11-20T15:22:39,193 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T15:22:39,197 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T15:22:39,200 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.3490 sec 2024-11-20T15:22:39,722 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T15:22:39,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=101 2024-11-20T15:22:39,957 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 101 completed 2024-11-20T15:22:39,958 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T15:22:39,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=105, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:22:39,960 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=105, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:22:39,960 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=105, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:22:39,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=105 2024-11-20T15:22:39,962 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:39,964 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A, FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B, FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C, FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/recovered.edits] 2024-11-20T15:22:39,967 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/10fa7ce5405344bfa41616de7feda1f2 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/10fa7ce5405344bfa41616de7feda1f2 2024-11-20T15:22:39,969 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/4a4acba8e86145ee960fcb92e329b095 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/A/4a4acba8e86145ee960fcb92e329b095 2024-11-20T15:22:39,971 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/6a85a17f95c741569451bdb7f75be3f1 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/6a85a17f95c741569451bdb7f75be3f1 2024-11-20T15:22:39,972 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/7fb82f85d3cf49f3bb25b3f457573fcb to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/B/7fb82f85d3cf49f3bb25b3f457573fcb 2024-11-20T15:22:39,975 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/32de1bd5858243e59305a7703d06fdbc to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/32de1bd5858243e59305a7703d06fdbc 2024-11-20T15:22:39,976 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/b77f8b8cba0c4b918b9c4c4296409715 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/C/b77f8b8cba0c4b918b9c4c4296409715 2024-11-20T15:22:39,979 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/recovered.edits/540.seqid to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf/recovered.edits/540.seqid 2024-11-20T15:22:39,980 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/6e158976a217b84d83e0feeee3ad6faf 2024-11-20T15:22:39,980 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T15:22:39,982 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=105, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:22:39,989 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T15:22:39,993 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T15:22:39,994 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=105, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:22:39,994 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T15:22:39,995 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732116159994"}]},"ts":"9223372036854775807"} 2024-11-20T15:22:39,998 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T15:22:39,998 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 6e158976a217b84d83e0feeee3ad6faf, NAME => 'TestAcidGuarantees,,1732116135584.6e158976a217b84d83e0feeee3ad6faf.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T15:22:39,998 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T15:22:39,998 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732116159998"}]},"ts":"9223372036854775807"} 2024-11-20T15:22:40,001 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T15:22:40,004 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=105, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:22:40,005 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 46 msec 2024-11-20T15:22:40,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=105 2024-11-20T15:22:40,061 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 105 completed 2024-11-20T15:22:40,072 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=239 (was 241), OpenFileDescriptor=451 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=565 (was 401) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5771 (was 6875) 2024-11-20T15:22:40,081 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=239, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=565, ProcessCount=11, AvailableMemoryMB=5771 2024-11-20T15:22:40,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T15:22:40,083 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T15:22:40,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T15:22:40,085 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=106, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T15:22:40,085 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:40,085 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 106 2024-11-20T15:22:40,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T15:22:40,086 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=106, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T15:22:40,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742213_1389 (size=960) 2024-11-20T15:22:40,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T15:22:40,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T15:22:40,496 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3 2024-11-20T15:22:40,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742214_1390 (size=53) 2024-11-20T15:22:40,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T15:22:40,923 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T15:22:40,923 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 3c7be1c0ab4594b9826cba013e95ae89, disabling compactions & flushes 2024-11-20T15:22:40,923 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:40,923 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:40,923 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. after waiting 0 ms 2024-11-20T15:22:40,923 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:40,923 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:40,923 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:40,925 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=106, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T15:22:40,926 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732116160925"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732116160925"}]},"ts":"1732116160925"} 2024-11-20T15:22:40,927 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T15:22:40,928 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=106, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T15:22:40,928 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732116160928"}]},"ts":"1732116160928"} 2024-11-20T15:22:40,929 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T15:22:40,933 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3c7be1c0ab4594b9826cba013e95ae89, ASSIGN}] 2024-11-20T15:22:40,934 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=107, ppid=106, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3c7be1c0ab4594b9826cba013e95ae89, ASSIGN 2024-11-20T15:22:40,935 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=107, ppid=106, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=3c7be1c0ab4594b9826cba013e95ae89, ASSIGN; state=OFFLINE, location=0b62285ead89,33387,1732116069954; forceNewPlan=false, retain=false 2024-11-20T15:22:41,086 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=107 updating hbase:meta row=3c7be1c0ab4594b9826cba013e95ae89, regionState=OPENING, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:22:41,087 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE; OpenRegionProcedure 3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954}] 2024-11-20T15:22:41,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T15:22:41,239 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:41,242 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:41,242 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7285): Opening region: {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} 2024-11-20T15:22:41,242 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:41,242 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T15:22:41,243 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7327): checking encryption for 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:41,243 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7330): checking classloading for 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:41,244 INFO [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:41,245 INFO [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:22:41,245 INFO [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c7be1c0ab4594b9826cba013e95ae89 columnFamilyName A 2024-11-20T15:22:41,245 DEBUG [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:41,246 INFO [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] regionserver.HStore(327): Store=3c7be1c0ab4594b9826cba013e95ae89/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:22:41,246 INFO [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:41,247 INFO [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:22:41,247 INFO [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c7be1c0ab4594b9826cba013e95ae89 columnFamilyName B 2024-11-20T15:22:41,247 DEBUG [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:41,248 INFO [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] regionserver.HStore(327): Store=3c7be1c0ab4594b9826cba013e95ae89/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:22:41,248 INFO [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:41,249 INFO [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:22:41,249 INFO [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c7be1c0ab4594b9826cba013e95ae89 columnFamilyName C 2024-11-20T15:22:41,249 DEBUG [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:41,250 INFO [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] regionserver.HStore(327): Store=3c7be1c0ab4594b9826cba013e95ae89/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:22:41,250 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:41,251 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:41,251 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:41,253 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T15:22:41,254 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1085): writing seq id for 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:41,255 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T15:22:41,256 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1102): Opened 3c7be1c0ab4594b9826cba013e95ae89; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74646399, jitterRate=0.1123180240392685}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T15:22:41,257 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1001): Region open journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:41,258 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89., pid=108, masterSystemTime=1732116161239 2024-11-20T15:22:41,260 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=107 updating hbase:meta row=3c7be1c0ab4594b9826cba013e95ae89, regionState=OPEN, openSeqNum=2, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:22:41,260 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:41,261 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:41,262 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=108, resume processing ppid=107 2024-11-20T15:22:41,262 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, ppid=107, state=SUCCESS; OpenRegionProcedure 3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 in 174 msec 2024-11-20T15:22:41,264 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-11-20T15:22:41,264 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3c7be1c0ab4594b9826cba013e95ae89, ASSIGN in 329 msec 2024-11-20T15:22:41,264 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=106, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T15:22:41,264 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732116161264"}]},"ts":"1732116161264"} 2024-11-20T15:22:41,265 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T15:22:41,268 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=106, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T15:22:41,269 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1850 sec 2024-11-20T15:22:42,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T15:22:42,190 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-11-20T15:22:42,192 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0bf5e2f0 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1b82ba2a 2024-11-20T15:22:42,196 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3637e4c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:22:42,198 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:22:42,199 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41840, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:22:42,201 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T15:22:42,202 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55616, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T15:22:42,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T15:22:42,204 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T15:22:42,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=109, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T15:22:42,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742215_1391 (size=996) 2024-11-20T15:22:42,237 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-20T15:22:42,237 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-20T15:22:42,239 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=110, ppid=109, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T15:22:42,242 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3c7be1c0ab4594b9826cba013e95ae89, REOPEN/MOVE}] 2024-11-20T15:22:42,242 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3c7be1c0ab4594b9826cba013e95ae89, REOPEN/MOVE 2024-11-20T15:22:42,243 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=111 updating hbase:meta row=3c7be1c0ab4594b9826cba013e95ae89, regionState=CLOSING, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:22:42,244 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T15:22:42,244 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=112, ppid=111, state=RUNNABLE; CloseRegionProcedure 3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954}] 2024-11-20T15:22:42,396 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:42,396 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(124): Close 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:42,396 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T15:22:42,397 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1681): Closing 3c7be1c0ab4594b9826cba013e95ae89, disabling compactions & flushes 2024-11-20T15:22:42,397 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:42,397 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:42,397 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. after waiting 0 ms 2024-11-20T15:22:42,397 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:42,404 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T15:22:42,405 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:42,405 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1635): Region close journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:42,405 WARN [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegionServer(3786): Not adding moved region record: 3c7be1c0ab4594b9826cba013e95ae89 to self. 2024-11-20T15:22:42,409 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(170): Closed 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:42,409 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=111 updating hbase:meta row=3c7be1c0ab4594b9826cba013e95ae89, regionState=CLOSED 2024-11-20T15:22:42,414 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=112, resume processing ppid=111 2024-11-20T15:22:42,414 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, ppid=111, state=SUCCESS; CloseRegionProcedure 3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 in 166 msec 2024-11-20T15:22:42,415 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=3c7be1c0ab4594b9826cba013e95ae89, REOPEN/MOVE; state=CLOSED, location=0b62285ead89,33387,1732116069954; forceNewPlan=false, retain=true 2024-11-20T15:22:42,567 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=111 updating hbase:meta row=3c7be1c0ab4594b9826cba013e95ae89, regionState=OPENING, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:22:42,569 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=111, state=RUNNABLE; OpenRegionProcedure 3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954}] 2024-11-20T15:22:42,721 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:42,724 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:42,724 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7285): Opening region: {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} 2024-11-20T15:22:42,725 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:42,725 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T15:22:42,725 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7327): checking encryption for 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:42,725 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7330): checking classloading for 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:42,726 INFO [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:42,727 INFO [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:22:42,727 INFO [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c7be1c0ab4594b9826cba013e95ae89 columnFamilyName A 2024-11-20T15:22:42,733 DEBUG [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:42,733 INFO [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] regionserver.HStore(327): Store=3c7be1c0ab4594b9826cba013e95ae89/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:22:42,734 INFO [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:42,735 INFO [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:22:42,735 INFO [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c7be1c0ab4594b9826cba013e95ae89 columnFamilyName B 2024-11-20T15:22:42,735 DEBUG [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:42,736 INFO [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] regionserver.HStore(327): Store=3c7be1c0ab4594b9826cba013e95ae89/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:22:42,736 INFO [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:42,740 INFO [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:22:42,740 INFO [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c7be1c0ab4594b9826cba013e95ae89 columnFamilyName C 2024-11-20T15:22:42,740 DEBUG [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:42,740 INFO [StoreOpener-3c7be1c0ab4594b9826cba013e95ae89-1 {}] regionserver.HStore(327): Store=3c7be1c0ab4594b9826cba013e95ae89/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:22:42,741 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:42,741 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:42,742 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:42,744 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T15:22:42,745 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1085): writing seq id for 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:42,746 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1102): Opened 3c7be1c0ab4594b9826cba013e95ae89; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70040538, jitterRate=0.043685346841812134}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T15:22:42,747 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1001): Region open journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:42,747 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89., pid=113, masterSystemTime=1732116162721 2024-11-20T15:22:42,749 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:42,749 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:42,749 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=111 updating hbase:meta row=3c7be1c0ab4594b9826cba013e95ae89, regionState=OPEN, openSeqNum=5, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:22:42,751 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=111 2024-11-20T15:22:42,751 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=111, state=SUCCESS; OpenRegionProcedure 3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 in 182 msec 2024-11-20T15:22:42,753 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-11-20T15:22:42,753 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3c7be1c0ab4594b9826cba013e95ae89, REOPEN/MOVE in 509 msec 2024-11-20T15:22:42,755 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=110, resume processing ppid=109 2024-11-20T15:22:42,755 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, ppid=109, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 514 msec 2024-11-20T15:22:42,756 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 551 msec 2024-11-20T15:22:42,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=109 2024-11-20T15:22:42,759 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x75b14fbd to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7b6cf8cb 2024-11-20T15:22:42,780 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72f422b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:22:42,781 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62f74604 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ec15031 2024-11-20T15:22:42,797 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2df33cdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:22:42,798 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x49e13594 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3dd5b441 2024-11-20T15:22:42,804 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9f472e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:22:42,805 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c54a0d3 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c336ea4 2024-11-20T15:22:42,822 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@167a78b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:22:42,824 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3875c8c5 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f94d721 2024-11-20T15:22:42,828 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5aee939b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:22:42,829 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x319559be to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f49665c 2024-11-20T15:22:42,852 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2205f666, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:22:42,853 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c907e21 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@683f8469 2024-11-20T15:22:42,865 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6584e9ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:22:42,866 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x61ec0f48 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@75e4d3d0 2024-11-20T15:22:42,885 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37ec8e3b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:22:42,886 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7819b9e2 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b308f62 2024-11-20T15:22:42,891 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@787e5169, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:22:42,892 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x47679076 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@68035c67 2024-11-20T15:22:42,895 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@627cad17, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:22:42,898 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:22:42,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-11-20T15:22:42,900 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:22:42,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T15:22:42,900 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:22:42,900 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:22:42,902 DEBUG [hconnection-0x66ed6089-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:22:42,904 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55892, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:22:42,905 DEBUG [hconnection-0x78275636-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:22:42,905 DEBUG [hconnection-0x4e45fa7c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:22:42,906 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55894, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:22:42,906 DEBUG [hconnection-0x1ec0c5d2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:22:42,907 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55906, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:22:42,908 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55922, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:22:42,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:42,915 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c7be1c0ab4594b9826cba013e95ae89 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T15:22:42,916 DEBUG [hconnection-0x11e10028-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:22:42,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=A 2024-11-20T15:22:42,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:42,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=B 2024-11-20T15:22:42,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:42,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=C 2024-11-20T15:22:42,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:42,918 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55926, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:22:42,918 DEBUG [hconnection-0x8d58b04-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:22:42,919 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55928, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:22:42,920 DEBUG [hconnection-0x6f0a9719-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:22:42,922 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55936, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:22:42,923 DEBUG [hconnection-0x73d6925-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:22:42,924 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55950, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:22:42,931 DEBUG [hconnection-0x541bb6a8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:22:42,932 DEBUG [hconnection-0x2f28602c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:22:42,933 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55952, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:22:42,934 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55966, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:22:42,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:42,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116222939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:42,942 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:42,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116222942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:42,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:42,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116222942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:42,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:42,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116222942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:42,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:42,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116222943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:42,952 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204f81f23af9df43828ff0b0c003b4009a_3c7be1c0ab4594b9826cba013e95ae89 is 50, key is test_row_0/A:col10/1732116162914/Put/seqid=0 2024-11-20T15:22:42,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742216_1392 (size=12154) 2024-11-20T15:22:43,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T15:22:43,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:43,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116223043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:43,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:43,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116223043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:43,044 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:43,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116223044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:43,044 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:43,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116223044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:43,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:43,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116223045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:43,052 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:43,052 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T15:22:43,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:43,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:43,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:43,053 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:43,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:43,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:43,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T15:22:43,206 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:43,207 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T15:22:43,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:43,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:43,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:43,207 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:43,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:43,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:43,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:43,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116223246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:43,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:43,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116223246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:43,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:43,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116223246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:43,248 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:43,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116223246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:43,248 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:43,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116223246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:43,360 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:43,360 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T15:22:43,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:43,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:43,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:43,361 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:43,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:43,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:43,381 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:43,385 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204f81f23af9df43828ff0b0c003b4009a_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204f81f23af9df43828ff0b0c003b4009a_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:43,387 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/c8723bf1d09d4698a39f57d95fbc3ed5, store: [table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:43,387 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/c8723bf1d09d4698a39f57d95fbc3ed5 is 175, key is test_row_0/A:col10/1732116162914/Put/seqid=0 2024-11-20T15:22:43,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742217_1393 (size=30955) 2024-11-20T15:22:43,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T15:22:43,513 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:43,514 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T15:22:43,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:43,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:43,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:43,514 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:43,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:43,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:43,549 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:43,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116223548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:43,550 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:43,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116223549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:43,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:43,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116223549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:43,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:43,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116223550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:43,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:43,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116223552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:43,670 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:43,671 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T15:22:43,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:43,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:43,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:43,671 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:43,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:43,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:43,817 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/c8723bf1d09d4698a39f57d95fbc3ed5 2024-11-20T15:22:43,823 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:43,824 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T15:22:43,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:43,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:43,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:43,825 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:43,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:43,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:43,876 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/ffccafe111234a9c8778021c9a9fe137 is 50, key is test_row_0/B:col10/1732116162914/Put/seqid=0 2024-11-20T15:22:43,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742218_1394 (size=12001) 2024-11-20T15:22:43,890 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/ffccafe111234a9c8778021c9a9fe137 2024-11-20T15:22:43,956 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/223eeef8a46d4caf8567879c7c344aa7 is 50, key is test_row_0/C:col10/1732116162914/Put/seqid=0 2024-11-20T15:22:43,978 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:43,978 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T15:22:43,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:43,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:43,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:43,979 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:43,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:43,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:43,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742219_1395 (size=12001) 2024-11-20T15:22:43,997 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/223eeef8a46d4caf8567879c7c344aa7 2024-11-20T15:22:44,002 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/c8723bf1d09d4698a39f57d95fbc3ed5 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/c8723bf1d09d4698a39f57d95fbc3ed5 2024-11-20T15:22:44,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T15:22:44,010 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/c8723bf1d09d4698a39f57d95fbc3ed5, entries=150, sequenceid=17, filesize=30.2 K 2024-11-20T15:22:44,011 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/ffccafe111234a9c8778021c9a9fe137 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/ffccafe111234a9c8778021c9a9fe137 2024-11-20T15:22:44,018 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/ffccafe111234a9c8778021c9a9fe137, entries=150, sequenceid=17, filesize=11.7 K 2024-11-20T15:22:44,022 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/223eeef8a46d4caf8567879c7c344aa7 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/223eeef8a46d4caf8567879c7c344aa7 2024-11-20T15:22:44,028 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/223eeef8a46d4caf8567879c7c344aa7, entries=150, sequenceid=17, filesize=11.7 K 2024-11-20T15:22:44,031 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for 3c7be1c0ab4594b9826cba013e95ae89 in 1116ms, sequenceid=17, compaction requested=false 2024-11-20T15:22:44,031 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-20T15:22:44,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:44,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:44,061 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c7be1c0ab4594b9826cba013e95ae89 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T15:22:44,062 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=A 2024-11-20T15:22:44,062 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:44,062 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=B 2024-11-20T15:22:44,062 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:44,062 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=C 2024-11-20T15:22:44,062 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:44,080 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120608c323bb9b54e3fbc0d6ce7f15f632f_3c7be1c0ab4594b9826cba013e95ae89 is 50, key is test_row_0/A:col10/1732116164060/Put/seqid=0 2024-11-20T15:22:44,080 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:44,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116224069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:44,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116224076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:44,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116224077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:44,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116224081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:44,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116224082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742220_1396 (size=12154) 2024-11-20T15:22:44,131 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:44,132 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,133 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T15:22:44,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:44,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:44,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:44,133 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:44,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:44,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:44,136 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120608c323bb9b54e3fbc0d6ce7f15f632f_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120608c323bb9b54e3fbc0d6ce7f15f632f_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:44,137 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/741d399152ee4f64908924faae14e2b0, store: [table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:44,137 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/741d399152ee4f64908924faae14e2b0 is 175, key is test_row_0/A:col10/1732116164060/Put/seqid=0 2024-11-20T15:22:44,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742221_1397 (size=30955) 2024-11-20T15:22:44,148 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=44, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/741d399152ee4f64908924faae14e2b0 2024-11-20T15:22:44,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/00022e7fc4a94c4db59bf7d19e5a8c63 is 50, key is test_row_0/B:col10/1732116164060/Put/seqid=0 2024-11-20T15:22:44,189 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:44,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116224187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,189 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:44,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116224187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:44,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116224188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:44,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116224188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:44,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116224190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742222_1398 (size=12001) 2024-11-20T15:22:44,285 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,286 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T15:22:44,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:44,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:44,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:44,286 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:44,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:44,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:44,394 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:44,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116224391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,395 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:44,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116224391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,399 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:44,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116224393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:44,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116224393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,407 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:44,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116224401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,438 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,439 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T15:22:44,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:44,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:44,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:44,439 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:44,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:44,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:44,591 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,592 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T15:22:44,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:44,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:44,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:44,592 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:44,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:44,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:44,603 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=44 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/00022e7fc4a94c4db59bf7d19e5a8c63 2024-11-20T15:22:44,622 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/bea896cc5d0a458c92be00710e859795 is 50, key is test_row_0/C:col10/1732116164060/Put/seqid=0 2024-11-20T15:22:44,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742223_1399 (size=12001) 2024-11-20T15:22:44,665 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=44 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/bea896cc5d0a458c92be00710e859795 2024-11-20T15:22:44,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/741d399152ee4f64908924faae14e2b0 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/741d399152ee4f64908924faae14e2b0 2024-11-20T15:22:44,678 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/741d399152ee4f64908924faae14e2b0, entries=150, sequenceid=44, filesize=30.2 K 2024-11-20T15:22:44,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/00022e7fc4a94c4db59bf7d19e5a8c63 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/00022e7fc4a94c4db59bf7d19e5a8c63 2024-11-20T15:22:44,687 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/00022e7fc4a94c4db59bf7d19e5a8c63, entries=150, sequenceid=44, filesize=11.7 K 2024-11-20T15:22:44,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/bea896cc5d0a458c92be00710e859795 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/bea896cc5d0a458c92be00710e859795 2024-11-20T15:22:44,694 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/bea896cc5d0a458c92be00710e859795, entries=150, sequenceid=44, filesize=11.7 K 2024-11-20T15:22:44,695 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 3c7be1c0ab4594b9826cba013e95ae89 in 634ms, sequenceid=44, compaction requested=false 2024-11-20T15:22:44,695 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:44,702 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c7be1c0ab4594b9826cba013e95ae89 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T15:22:44,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:44,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=A 2024-11-20T15:22:44,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:44,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=B 2024-11-20T15:22:44,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:44,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=C 2024-11-20T15:22:44,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:44,718 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f97073ae1086402a9d08a55f0aad0bc8_3c7be1c0ab4594b9826cba013e95ae89 is 50, key is test_row_0/A:col10/1732116164075/Put/seqid=0 2024-11-20T15:22:44,731 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T15:22:44,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742224_1400 (size=17034) 2024-11-20T15:22:44,745 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,746 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T15:22:44,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:44,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:44,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:44,746 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:44,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:44,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:44,754 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:44,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116224744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,758 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:44,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116224747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,759 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:44,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116224750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,760 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:44,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116224751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,760 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:44,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116224754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,861 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:44,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116224856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,863 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:44,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116224859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:44,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116224861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:44,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116224861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:44,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116224863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,899 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:44,900 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T15:22:44,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:44,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:44,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:44,901 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:44,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:44,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:45,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T15:22:45,055 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:45,057 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T15:22:45,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:45,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:45,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:45,058 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:45,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:45,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:45,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:45,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116225064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:45,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:45,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116225066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:45,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:45,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116225067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:45,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:45,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116225067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:45,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:45,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116225068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:45,145 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:45,154 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f97073ae1086402a9d08a55f0aad0bc8_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f97073ae1086402a9d08a55f0aad0bc8_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:45,156 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/90d777cae7f443d59ba5feae3bdc068d, store: [table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:45,156 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/90d777cae7f443d59ba5feae3bdc068d is 175, key is test_row_0/A:col10/1732116164075/Put/seqid=0 2024-11-20T15:22:45,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742225_1401 (size=48139) 2024-11-20T15:22:45,207 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=56, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/90d777cae7f443d59ba5feae3bdc068d 2024-11-20T15:22:45,214 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:45,215 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T15:22:45,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:45,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:45,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:45,216 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:45,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:45,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:45,230 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/ccc7875f0b744651a44d0c049f39e137 is 50, key is test_row_0/B:col10/1732116164075/Put/seqid=0 2024-11-20T15:22:45,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742226_1402 (size=12001) 2024-11-20T15:22:45,286 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/ccc7875f0b744651a44d0c049f39e137 2024-11-20T15:22:45,315 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/b7ef62641f26496f98a05027cc4b5eaa is 50, key is test_row_0/C:col10/1732116164075/Put/seqid=0 2024-11-20T15:22:45,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742227_1403 (size=12001) 2024-11-20T15:22:45,344 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/b7ef62641f26496f98a05027cc4b5eaa 2024-11-20T15:22:45,351 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/90d777cae7f443d59ba5feae3bdc068d as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/90d777cae7f443d59ba5feae3bdc068d 2024-11-20T15:22:45,359 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/90d777cae7f443d59ba5feae3bdc068d, entries=250, sequenceid=56, filesize=47.0 K 2024-11-20T15:22:45,360 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/ccc7875f0b744651a44d0c049f39e137 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/ccc7875f0b744651a44d0c049f39e137 2024-11-20T15:22:45,366 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/ccc7875f0b744651a44d0c049f39e137, entries=150, sequenceid=56, filesize=11.7 K 2024-11-20T15:22:45,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/b7ef62641f26496f98a05027cc4b5eaa as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/b7ef62641f26496f98a05027cc4b5eaa 2024-11-20T15:22:45,368 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:45,369 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T15:22:45,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:45,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:45,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:45,369 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:45,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:45,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:45,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:45,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116225372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:45,381 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:45,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116225376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:45,381 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:45,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116225377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:45,381 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:45,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116225377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:45,382 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:45,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116225378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:45,384 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/b7ef62641f26496f98a05027cc4b5eaa, entries=150, sequenceid=56, filesize=11.7 K 2024-11-20T15:22:45,385 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 3c7be1c0ab4594b9826cba013e95ae89 in 683ms, sequenceid=56, compaction requested=true 2024-11-20T15:22:45,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:45,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c7be1c0ab4594b9826cba013e95ae89:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:22:45,386 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:45,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:45,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c7be1c0ab4594b9826cba013e95ae89:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:22:45,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:45,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c7be1c0ab4594b9826cba013e95ae89:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:22:45,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:45,386 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:45,387 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:45,387 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 3c7be1c0ab4594b9826cba013e95ae89/A is initiating minor compaction (all files) 2024-11-20T15:22:45,387 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c7be1c0ab4594b9826cba013e95ae89/A in TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:45,387 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/c8723bf1d09d4698a39f57d95fbc3ed5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/741d399152ee4f64908924faae14e2b0, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/90d777cae7f443d59ba5feae3bdc068d] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp, totalSize=107.5 K 2024-11-20T15:22:45,387 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:45,387 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. files: [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/c8723bf1d09d4698a39f57d95fbc3ed5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/741d399152ee4f64908924faae14e2b0, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/90d777cae7f443d59ba5feae3bdc068d] 2024-11-20T15:22:45,388 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting c8723bf1d09d4698a39f57d95fbc3ed5, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732116162913 2024-11-20T15:22:45,388 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:45,388 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 3c7be1c0ab4594b9826cba013e95ae89/B is initiating minor compaction (all files) 2024-11-20T15:22:45,388 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c7be1c0ab4594b9826cba013e95ae89/B in TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:45,388 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/ffccafe111234a9c8778021c9a9fe137, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/00022e7fc4a94c4db59bf7d19e5a8c63, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/ccc7875f0b744651a44d0c049f39e137] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp, totalSize=35.2 K 2024-11-20T15:22:45,388 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 741d399152ee4f64908924faae14e2b0, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1732116162940 2024-11-20T15:22:45,389 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting ffccafe111234a9c8778021c9a9fe137, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732116162913 2024-11-20T15:22:45,389 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 90d777cae7f443d59ba5feae3bdc068d, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732116164075 2024-11-20T15:22:45,389 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 00022e7fc4a94c4db59bf7d19e5a8c63, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1732116162940 2024-11-20T15:22:45,389 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting ccc7875f0b744651a44d0c049f39e137, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732116164075 2024-11-20T15:22:45,403 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c7be1c0ab4594b9826cba013e95ae89#B#compaction#351 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:45,404 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/29c098925c6d44ddba76604907fa55c0 is 50, key is test_row_0/B:col10/1732116164075/Put/seqid=0 2024-11-20T15:22:45,415 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:45,438 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120dc7face43edd42f297fae9cbbfdb4bf3_3c7be1c0ab4594b9826cba013e95ae89 store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:45,440 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120dc7face43edd42f297fae9cbbfdb4bf3_3c7be1c0ab4594b9826cba013e95ae89, store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:45,441 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120dc7face43edd42f297fae9cbbfdb4bf3_3c7be1c0ab4594b9826cba013e95ae89 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:45,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742228_1404 (size=12104) 2024-11-20T15:22:45,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742229_1405 (size=4469) 2024-11-20T15:22:45,499 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c7be1c0ab4594b9826cba013e95ae89#A#compaction#352 average throughput is 0.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:45,499 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/5ba3e29a926c40b095401f36dd2d1436 is 175, key is test_row_0/A:col10/1732116164075/Put/seqid=0 2024-11-20T15:22:45,524 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:45,524 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T15:22:45,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:45,525 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 3c7be1c0ab4594b9826cba013e95ae89 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T15:22:45,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=A 2024-11-20T15:22:45,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:45,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=B 2024-11-20T15:22:45,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:45,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=C 2024-11-20T15:22:45,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:45,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742230_1406 (size=31058) 2024-11-20T15:22:45,555 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/5ba3e29a926c40b095401f36dd2d1436 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/5ba3e29a926c40b095401f36dd2d1436 2024-11-20T15:22:45,562 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c7be1c0ab4594b9826cba013e95ae89/A of 3c7be1c0ab4594b9826cba013e95ae89 into 5ba3e29a926c40b095401f36dd2d1436(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:45,562 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:45,562 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89., storeName=3c7be1c0ab4594b9826cba013e95ae89/A, priority=13, startTime=1732116165386; duration=0sec 2024-11-20T15:22:45,562 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:45,562 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c7be1c0ab4594b9826cba013e95ae89:A 2024-11-20T15:22:45,562 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:45,564 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:45,564 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 3c7be1c0ab4594b9826cba013e95ae89/C is initiating minor compaction (all files) 2024-11-20T15:22:45,564 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c7be1c0ab4594b9826cba013e95ae89/C in TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:45,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202af79b6e1eac4c5ea7e446540d066aad_3c7be1c0ab4594b9826cba013e95ae89 is 50, key is test_row_0/A:col10/1732116164753/Put/seqid=0 2024-11-20T15:22:45,564 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/223eeef8a46d4caf8567879c7c344aa7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/bea896cc5d0a458c92be00710e859795, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/b7ef62641f26496f98a05027cc4b5eaa] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp, totalSize=35.2 K 2024-11-20T15:22:45,565 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 223eeef8a46d4caf8567879c7c344aa7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732116162913 2024-11-20T15:22:45,565 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting bea896cc5d0a458c92be00710e859795, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1732116162940 2024-11-20T15:22:45,565 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting b7ef62641f26496f98a05027cc4b5eaa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732116164075 2024-11-20T15:22:45,591 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c7be1c0ab4594b9826cba013e95ae89#C#compaction#354 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:45,592 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/d802e3268aee4bbb8b14819c7a0ffb79 is 50, key is test_row_0/C:col10/1732116164075/Put/seqid=0 2024-11-20T15:22:45,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742231_1407 (size=12154) 2024-11-20T15:22:45,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742232_1408 (size=12104) 2024-11-20T15:22:45,871 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/29c098925c6d44ddba76604907fa55c0 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/29c098925c6d44ddba76604907fa55c0 2024-11-20T15:22:45,879 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c7be1c0ab4594b9826cba013e95ae89/B of 3c7be1c0ab4594b9826cba013e95ae89 into 29c098925c6d44ddba76604907fa55c0(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:45,879 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:45,879 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89., storeName=3c7be1c0ab4594b9826cba013e95ae89/B, priority=13, startTime=1732116165386; duration=0sec 2024-11-20T15:22:45,879 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:45,879 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c7be1c0ab4594b9826cba013e95ae89:B 2024-11-20T15:22:45,890 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:45,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:45,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:45,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116225894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:45,913 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:45,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116225904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:45,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:45,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116225904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:45,915 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:45,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116225905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:45,917 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:45,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116225906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:46,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:46,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116226009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:46,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:46,019 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202af79b6e1eac4c5ea7e446540d066aad_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202af79b6e1eac4c5ea7e446540d066aad_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:46,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/0c43f708a3244a26afb00de05a7971fa, store: [table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:46,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/0c43f708a3244a26afb00de05a7971fa is 175, key is test_row_0/A:col10/1732116164753/Put/seqid=0 2024-11-20T15:22:46,022 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:46,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116226015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:46,023 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:46,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116226016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:46,023 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:46,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116226016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:46,026 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:46,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116226020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:46,035 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/d802e3268aee4bbb8b14819c7a0ffb79 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/d802e3268aee4bbb8b14819c7a0ffb79 2024-11-20T15:22:46,040 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c7be1c0ab4594b9826cba013e95ae89/C of 3c7be1c0ab4594b9826cba013e95ae89 into d802e3268aee4bbb8b14819c7a0ffb79(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:46,040 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:46,040 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89., storeName=3c7be1c0ab4594b9826cba013e95ae89/C, priority=13, startTime=1732116165386; duration=0sec 2024-11-20T15:22:46,040 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:46,040 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c7be1c0ab4594b9826cba013e95ae89:C 2024-11-20T15:22:46,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742233_1409 (size=30955) 2024-11-20T15:22:46,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:46,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116226221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:46,226 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:46,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116226224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:46,231 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:46,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116226225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:46,231 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:46,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116226225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:46,233 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:46,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116226227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:46,454 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=80, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/0c43f708a3244a26afb00de05a7971fa 2024-11-20T15:22:46,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/43675b55b77e43b0939af50b6612bf6f is 50, key is test_row_0/B:col10/1732116164753/Put/seqid=0 2024-11-20T15:22:46,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742234_1410 (size=12001) 2024-11-20T15:22:46,505 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/43675b55b77e43b0939af50b6612bf6f 2024-11-20T15:22:46,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:46,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116226526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:46,534 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:46,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116226529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:46,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/81ce6b4068ba4c318b8d06441a7d001b is 50, key is test_row_0/C:col10/1732116164753/Put/seqid=0 2024-11-20T15:22:46,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:46,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116226533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:46,540 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:46,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116226535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:46,541 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:46,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116226535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:46,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742235_1411 (size=12001) 2024-11-20T15:22:46,975 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/81ce6b4068ba4c318b8d06441a7d001b 2024-11-20T15:22:46,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/0c43f708a3244a26afb00de05a7971fa as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/0c43f708a3244a26afb00de05a7971fa 2024-11-20T15:22:46,995 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/0c43f708a3244a26afb00de05a7971fa, entries=150, sequenceid=80, filesize=30.2 K 2024-11-20T15:22:46,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/43675b55b77e43b0939af50b6612bf6f as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/43675b55b77e43b0939af50b6612bf6f 2024-11-20T15:22:47,003 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/43675b55b77e43b0939af50b6612bf6f, entries=150, sequenceid=80, filesize=11.7 K 2024-11-20T15:22:47,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/81ce6b4068ba4c318b8d06441a7d001b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/81ce6b4068ba4c318b8d06441a7d001b 2024-11-20T15:22:47,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T15:22:47,014 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/81ce6b4068ba4c318b8d06441a7d001b, entries=150, sequenceid=80, filesize=11.7 K 2024-11-20T15:22:47,015 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 3c7be1c0ab4594b9826cba013e95ae89 in 1491ms, sequenceid=80, compaction requested=false 2024-11-20T15:22:47,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:47,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:47,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-11-20T15:22:47,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-11-20T15:22:47,018 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-11-20T15:22:47,018 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 4.1160 sec 2024-11-20T15:22:47,021 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 4.1200 sec 2024-11-20T15:22:47,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:47,044 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c7be1c0ab4594b9826cba013e95ae89 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T15:22:47,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=A 2024-11-20T15:22:47,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:47,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=B 2024-11-20T15:22:47,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:47,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=C 2024-11-20T15:22:47,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:47,071 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b0c1b4a28df84bb8957f25a7bb34e410_3c7be1c0ab4594b9826cba013e95ae89 is 50, key is test_row_0/A:col10/1732116165902/Put/seqid=0 2024-11-20T15:22:47,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:47,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116227080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:47,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:47,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116227081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:47,092 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:47,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116227084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:47,098 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:47,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116227088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:47,099 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:47,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116227090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:47,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742236_1412 (size=12154) 2024-11-20T15:22:47,107 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:47,112 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b0c1b4a28df84bb8957f25a7bb34e410_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b0c1b4a28df84bb8957f25a7bb34e410_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:47,117 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/cc7ba80a95f24d6f91736e61d6e3e867, store: [table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:47,118 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/cc7ba80a95f24d6f91736e61d6e3e867 is 175, key is test_row_0/A:col10/1732116165902/Put/seqid=0 2024-11-20T15:22:47,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742237_1413 (size=30955) 2024-11-20T15:22:47,165 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=98, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/cc7ba80a95f24d6f91736e61d6e3e867 2024-11-20T15:22:47,186 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/c1a4d50b1aeb443f8af82a9420a886e1 is 50, key is test_row_0/B:col10/1732116165902/Put/seqid=0 2024-11-20T15:22:47,197 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:47,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116227191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:47,198 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:47,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116227192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:47,198 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:47,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116227193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:47,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:47,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116227200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:47,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:47,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116227200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:47,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742238_1414 (size=12001) 2024-11-20T15:22:47,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:47,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116227399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:47,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:47,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116227399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:47,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:47,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116227399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:47,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:47,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116227408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:47,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:47,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116227409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:47,641 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=98 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/c1a4d50b1aeb443f8af82a9420a886e1 2024-11-20T15:22:47,668 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/16029e80807b4772a7177353ce88a38b is 50, key is test_row_0/C:col10/1732116165902/Put/seqid=0 2024-11-20T15:22:47,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742239_1415 (size=12001) 2024-11-20T15:22:47,710 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=98 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/16029e80807b4772a7177353ce88a38b 2024-11-20T15:22:47,714 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:47,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116227708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:47,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:47,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116227708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:47,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:47,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116227708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:47,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/cc7ba80a95f24d6f91736e61d6e3e867 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/cc7ba80a95f24d6f91736e61d6e3e867 2024-11-20T15:22:47,723 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:47,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116227716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:47,723 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:47,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116227716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:47,729 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/cc7ba80a95f24d6f91736e61d6e3e867, entries=150, sequenceid=98, filesize=30.2 K 2024-11-20T15:22:47,730 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/c1a4d50b1aeb443f8af82a9420a886e1 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c1a4d50b1aeb443f8af82a9420a886e1 2024-11-20T15:22:47,737 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c1a4d50b1aeb443f8af82a9420a886e1, entries=150, sequenceid=98, filesize=11.7 K 2024-11-20T15:22:47,738 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/16029e80807b4772a7177353ce88a38b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/16029e80807b4772a7177353ce88a38b 2024-11-20T15:22:47,747 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/16029e80807b4772a7177353ce88a38b, entries=150, sequenceid=98, filesize=11.7 K 2024-11-20T15:22:47,750 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 3c7be1c0ab4594b9826cba013e95ae89 in 707ms, sequenceid=98, compaction requested=true 2024-11-20T15:22:47,750 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:47,750 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:47,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c7be1c0ab4594b9826cba013e95ae89:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:22:47,753 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:47,753 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 3c7be1c0ab4594b9826cba013e95ae89/A is initiating minor compaction (all files) 2024-11-20T15:22:47,753 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c7be1c0ab4594b9826cba013e95ae89/A in TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:47,753 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/5ba3e29a926c40b095401f36dd2d1436, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/0c43f708a3244a26afb00de05a7971fa, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/cc7ba80a95f24d6f91736e61d6e3e867] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp, totalSize=90.8 K 2024-11-20T15:22:47,754 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:47,754 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. files: [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/5ba3e29a926c40b095401f36dd2d1436, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/0c43f708a3244a26afb00de05a7971fa, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/cc7ba80a95f24d6f91736e61d6e3e867] 2024-11-20T15:22:47,754 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5ba3e29a926c40b095401f36dd2d1436, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732116164075 2024-11-20T15:22:47,755 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c43f708a3244a26afb00de05a7971fa, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732116164744 2024-11-20T15:22:47,755 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting cc7ba80a95f24d6f91736e61d6e3e867, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1732116165902 2024-11-20T15:22:47,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:47,755 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:47,757 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:47,757 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 3c7be1c0ab4594b9826cba013e95ae89/B is initiating minor compaction (all files) 2024-11-20T15:22:47,757 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c7be1c0ab4594b9826cba013e95ae89/B in TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:47,757 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/29c098925c6d44ddba76604907fa55c0, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/43675b55b77e43b0939af50b6612bf6f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c1a4d50b1aeb443f8af82a9420a886e1] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp, totalSize=35.3 K 2024-11-20T15:22:47,759 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 29c098925c6d44ddba76604907fa55c0, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732116164075 2024-11-20T15:22:47,759 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 43675b55b77e43b0939af50b6612bf6f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732116164744 2024-11-20T15:22:47,759 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting c1a4d50b1aeb443f8af82a9420a886e1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1732116165902 2024-11-20T15:22:47,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c7be1c0ab4594b9826cba013e95ae89:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:22:47,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:47,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c7be1c0ab4594b9826cba013e95ae89:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:22:47,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:47,776 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:47,788 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c7be1c0ab4594b9826cba013e95ae89#B#compaction#361 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:47,789 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/93a2198f36d7470eb32c245eaac29c96 is 50, key is test_row_0/B:col10/1732116165902/Put/seqid=0 2024-11-20T15:22:47,797 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411201f8664b18cec4e4b94979e7699b37e4c_3c7be1c0ab4594b9826cba013e95ae89 store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:47,798 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411201f8664b18cec4e4b94979e7699b37e4c_3c7be1c0ab4594b9826cba013e95ae89, store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:47,798 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201f8664b18cec4e4b94979e7699b37e4c_3c7be1c0ab4594b9826cba013e95ae89 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:47,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742240_1416 (size=12207) 2024-11-20T15:22:47,843 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/93a2198f36d7470eb32c245eaac29c96 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/93a2198f36d7470eb32c245eaac29c96 2024-11-20T15:22:47,849 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c7be1c0ab4594b9826cba013e95ae89/B of 3c7be1c0ab4594b9826cba013e95ae89 into 93a2198f36d7470eb32c245eaac29c96(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:47,850 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:47,850 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89., storeName=3c7be1c0ab4594b9826cba013e95ae89/B, priority=13, startTime=1732116167755; duration=0sec 2024-11-20T15:22:47,850 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:47,850 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c7be1c0ab4594b9826cba013e95ae89:B 2024-11-20T15:22:47,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742241_1417 (size=4469) 2024-11-20T15:22:47,850 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:47,851 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:47,851 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 3c7be1c0ab4594b9826cba013e95ae89/C is initiating minor compaction (all files) 2024-11-20T15:22:47,851 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c7be1c0ab4594b9826cba013e95ae89/C in TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:47,852 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/d802e3268aee4bbb8b14819c7a0ffb79, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/81ce6b4068ba4c318b8d06441a7d001b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/16029e80807b4772a7177353ce88a38b] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp, totalSize=35.3 K 2024-11-20T15:22:47,852 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting d802e3268aee4bbb8b14819c7a0ffb79, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732116164075 2024-11-20T15:22:47,853 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 81ce6b4068ba4c318b8d06441a7d001b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732116164744 2024-11-20T15:22:47,853 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c7be1c0ab4594b9826cba013e95ae89#A#compaction#360 average throughput is 0.32 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:47,854 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 16029e80807b4772a7177353ce88a38b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1732116165902 2024-11-20T15:22:47,854 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/8365611f7a6e4147bb0baeaec77ad295 is 175, key is test_row_0/A:col10/1732116165902/Put/seqid=0 2024-11-20T15:22:47,867 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c7be1c0ab4594b9826cba013e95ae89#C#compaction#362 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:47,867 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/1305cd59f41b4faba7c452733c6fdcff is 50, key is test_row_0/C:col10/1732116165902/Put/seqid=0 2024-11-20T15:22:47,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742242_1418 (size=31161) 2024-11-20T15:22:47,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742243_1419 (size=12207) 2024-11-20T15:22:48,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:48,223 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c7be1c0ab4594b9826cba013e95ae89 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T15:22:48,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=A 2024-11-20T15:22:48,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:48,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=B 2024-11-20T15:22:48,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:48,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=C 2024-11-20T15:22:48,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:48,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112082e5bee926b346f58ce5a8db6f5ac28c_3c7be1c0ab4594b9826cba013e95ae89 is 50, key is test_row_0/A:col10/1732116167078/Put/seqid=0 2024-11-20T15:22:48,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742244_1420 (size=14594) 2024-11-20T15:22:48,241 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:48,248 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:48,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116228239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:48,249 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:48,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116228240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:48,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:48,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116228247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:48,255 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112082e5bee926b346f58ce5a8db6f5ac28c_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112082e5bee926b346f58ce5a8db6f5ac28c_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:48,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:48,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116228248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:48,257 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/291cc92b77a042008d70912448733b57, store: [table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:48,257 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:48,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116228249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:48,258 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/291cc92b77a042008d70912448733b57 is 175, key is test_row_0/A:col10/1732116167078/Put/seqid=0 2024-11-20T15:22:48,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742245_1421 (size=39549) 2024-11-20T15:22:48,269 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=121, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/291cc92b77a042008d70912448733b57 2024-11-20T15:22:48,281 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/2b07c6d5ca554cccac3f5cec9544da39 is 50, key is test_row_0/B:col10/1732116167078/Put/seqid=0 2024-11-20T15:22:48,298 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/8365611f7a6e4147bb0baeaec77ad295 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/8365611f7a6e4147bb0baeaec77ad295 2024-11-20T15:22:48,304 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c7be1c0ab4594b9826cba013e95ae89/A of 3c7be1c0ab4594b9826cba013e95ae89 into 8365611f7a6e4147bb0baeaec77ad295(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:48,304 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:48,304 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89., storeName=3c7be1c0ab4594b9826cba013e95ae89/A, priority=13, startTime=1732116167750; duration=0sec 2024-11-20T15:22:48,304 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:48,304 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c7be1c0ab4594b9826cba013e95ae89:A 2024-11-20T15:22:48,314 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/1305cd59f41b4faba7c452733c6fdcff as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/1305cd59f41b4faba7c452733c6fdcff 2024-11-20T15:22:48,318 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c7be1c0ab4594b9826cba013e95ae89/C of 3c7be1c0ab4594b9826cba013e95ae89 into 1305cd59f41b4faba7c452733c6fdcff(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:48,318 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:48,318 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89., storeName=3c7be1c0ab4594b9826cba013e95ae89/C, priority=13, startTime=1732116167768; duration=0sec 2024-11-20T15:22:48,318 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:48,318 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c7be1c0ab4594b9826cba013e95ae89:C 2024-11-20T15:22:48,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742246_1422 (size=12001) 2024-11-20T15:22:48,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:48,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116228350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:48,356 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:48,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116228353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:48,361 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:48,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116228356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:48,363 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:48,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116228358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:48,364 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:48,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116228359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:48,562 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:48,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116228556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:48,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:48,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116228558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:48,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:48,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116228564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:48,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:48,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116228565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:48,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:48,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116228565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:48,740 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/2b07c6d5ca554cccac3f5cec9544da39 2024-11-20T15:22:48,748 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/9cab56a1c19f43978eb8adc499d73586 is 50, key is test_row_0/C:col10/1732116167078/Put/seqid=0 2024-11-20T15:22:48,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742247_1423 (size=12001) 2024-11-20T15:22:48,758 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/9cab56a1c19f43978eb8adc499d73586 2024-11-20T15:22:48,763 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/291cc92b77a042008d70912448733b57 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/291cc92b77a042008d70912448733b57 2024-11-20T15:22:48,767 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/291cc92b77a042008d70912448733b57, entries=200, sequenceid=121, filesize=38.6 K 2024-11-20T15:22:48,768 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/2b07c6d5ca554cccac3f5cec9544da39 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/2b07c6d5ca554cccac3f5cec9544da39 2024-11-20T15:22:48,771 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/2b07c6d5ca554cccac3f5cec9544da39, entries=150, sequenceid=121, filesize=11.7 K 2024-11-20T15:22:48,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/9cab56a1c19f43978eb8adc499d73586 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/9cab56a1c19f43978eb8adc499d73586 2024-11-20T15:22:48,775 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/9cab56a1c19f43978eb8adc499d73586, entries=150, sequenceid=121, filesize=11.7 K 2024-11-20T15:22:48,776 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 3c7be1c0ab4594b9826cba013e95ae89 in 553ms, sequenceid=121, compaction requested=false 2024-11-20T15:22:48,776 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:48,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:48,866 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c7be1c0ab4594b9826cba013e95ae89 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T15:22:48,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=A 2024-11-20T15:22:48,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:48,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=B 2024-11-20T15:22:48,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:48,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=C 2024-11-20T15:22:48,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:48,875 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112099c00aea3d254410a0ffde7b6817afdf_3c7be1c0ab4594b9826cba013e95ae89 is 50, key is test_row_0/A:col10/1732116168247/Put/seqid=0 2024-11-20T15:22:48,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:48,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116228900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:48,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:48,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:48,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116228902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:48,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116228901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:48,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742248_1424 (size=14794) 2024-11-20T15:22:48,915 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:48,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116228905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:48,916 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:48,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116228907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:49,012 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:49,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116229008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:49,012 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:49,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116229010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:49,015 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:49,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116229012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:49,018 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:49,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116229017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:49,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:49,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116229019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:49,214 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:49,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116229213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:49,215 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:49,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116229214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:49,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:49,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116229217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:49,223 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:49,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116229219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:49,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:49,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116229226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:49,310 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:49,315 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112099c00aea3d254410a0ffde7b6817afdf_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112099c00aea3d254410a0ffde7b6817afdf_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:49,316 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/0c9f59a0a0ad4e59a3e57346c6ec60dc, store: [table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:49,317 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/0c9f59a0a0ad4e59a3e57346c6ec60dc is 175, key is test_row_0/A:col10/1732116168247/Put/seqid=0 2024-11-20T15:22:49,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742249_1425 (size=39749) 2024-11-20T15:22:49,331 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=139, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/0c9f59a0a0ad4e59a3e57346c6ec60dc 2024-11-20T15:22:49,341 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/13196a89aa1f426db4c153b3df1f78bc is 50, key is test_row_0/B:col10/1732116168247/Put/seqid=0 2024-11-20T15:22:49,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742250_1426 (size=12151) 2024-11-20T15:22:49,365 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=139 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/13196a89aa1f426db4c153b3df1f78bc 2024-11-20T15:22:49,377 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/8db99bf7b7624a4ebf4c06813b7413c8 is 50, key is test_row_0/C:col10/1732116168247/Put/seqid=0 2024-11-20T15:22:49,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742251_1427 (size=12151) 2024-11-20T15:22:49,520 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:49,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116229516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:49,522 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:49,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116229516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:49,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:49,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116229525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:49,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:49,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116229525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:49,536 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:49,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116229533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:49,722 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T15:22:49,722 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-20T15:22:49,810 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=139 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/8db99bf7b7624a4ebf4c06813b7413c8 2024-11-20T15:22:49,814 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/0c9f59a0a0ad4e59a3e57346c6ec60dc as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/0c9f59a0a0ad4e59a3e57346c6ec60dc 2024-11-20T15:22:49,817 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/0c9f59a0a0ad4e59a3e57346c6ec60dc, entries=200, sequenceid=139, filesize=38.8 K 2024-11-20T15:22:49,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/13196a89aa1f426db4c153b3df1f78bc as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/13196a89aa1f426db4c153b3df1f78bc 2024-11-20T15:22:49,820 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/13196a89aa1f426db4c153b3df1f78bc, entries=150, sequenceid=139, filesize=11.9 K 2024-11-20T15:22:49,821 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/8db99bf7b7624a4ebf4c06813b7413c8 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/8db99bf7b7624a4ebf4c06813b7413c8 2024-11-20T15:22:49,824 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/8db99bf7b7624a4ebf4c06813b7413c8, entries=150, sequenceid=139, filesize=11.9 K 2024-11-20T15:22:49,825 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 3c7be1c0ab4594b9826cba013e95ae89 in 959ms, sequenceid=139, compaction requested=true 2024-11-20T15:22:49,825 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:49,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c7be1c0ab4594b9826cba013e95ae89:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:22:49,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:49,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c7be1c0ab4594b9826cba013e95ae89:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:22:49,825 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:49,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:49,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c7be1c0ab4594b9826cba013e95ae89:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:22:49,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:49,825 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:49,826 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:49,826 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 3c7be1c0ab4594b9826cba013e95ae89/A is initiating minor compaction (all files) 2024-11-20T15:22:49,826 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c7be1c0ab4594b9826cba013e95ae89/A in TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:49,827 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/8365611f7a6e4147bb0baeaec77ad295, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/291cc92b77a042008d70912448733b57, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/0c9f59a0a0ad4e59a3e57346c6ec60dc] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp, totalSize=107.9 K 2024-11-20T15:22:49,827 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:49,827 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. files: [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/8365611f7a6e4147bb0baeaec77ad295, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/291cc92b77a042008d70912448733b57, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/0c9f59a0a0ad4e59a3e57346c6ec60dc] 2024-11-20T15:22:49,827 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:49,827 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 3c7be1c0ab4594b9826cba013e95ae89/B is initiating minor compaction (all files) 2024-11-20T15:22:49,827 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c7be1c0ab4594b9826cba013e95ae89/B in TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:49,827 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/93a2198f36d7470eb32c245eaac29c96, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/2b07c6d5ca554cccac3f5cec9544da39, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/13196a89aa1f426db4c153b3df1f78bc] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp, totalSize=35.5 K 2024-11-20T15:22:49,827 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8365611f7a6e4147bb0baeaec77ad295, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1732116165902 2024-11-20T15:22:49,827 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 93a2198f36d7470eb32c245eaac29c96, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1732116165902 2024-11-20T15:22:49,828 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 291cc92b77a042008d70912448733b57, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1732116167078 2024-11-20T15:22:49,828 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b07c6d5ca554cccac3f5cec9544da39, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1732116167078 2024-11-20T15:22:49,828 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c9f59a0a0ad4e59a3e57346c6ec60dc, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732116168238 2024-11-20T15:22:49,828 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 13196a89aa1f426db4c153b3df1f78bc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732116168247 2024-11-20T15:22:49,836 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c7be1c0ab4594b9826cba013e95ae89#B#compaction#369 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:49,836 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/22892832d1884071a668b9bd1520220a is 50, key is test_row_0/B:col10/1732116168247/Put/seqid=0 2024-11-20T15:22:49,837 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:49,856 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120c9fe9628ba864ca4ab33b4a83d589a9b_3c7be1c0ab4594b9826cba013e95ae89 store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:49,858 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120c9fe9628ba864ca4ab33b4a83d589a9b_3c7be1c0ab4594b9826cba013e95ae89, store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:49,859 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c9fe9628ba864ca4ab33b4a83d589a9b_3c7be1c0ab4594b9826cba013e95ae89 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:49,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742252_1428 (size=12459) 2024-11-20T15:22:49,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742253_1429 (size=4469) 2024-11-20T15:22:50,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:50,025 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c7be1c0ab4594b9826cba013e95ae89 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T15:22:50,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=A 2024-11-20T15:22:50,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:50,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=B 2024-11-20T15:22:50,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:50,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=C 2024-11-20T15:22:50,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:50,032 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209683b72655ef400bbce929d71a663fb0_3c7be1c0ab4594b9826cba013e95ae89 is 50, key is test_row_0/A:col10/1732116168902/Put/seqid=0 2024-11-20T15:22:50,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742254_1430 (size=14794) 2024-11-20T15:22:50,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:50,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116230045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:50,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:50,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116230046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:50,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:50,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116230047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:50,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:50,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116230047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:50,054 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:50,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116230048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:50,151 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:50,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116230149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:50,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:50,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116230153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:50,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:50,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116230154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:50,157 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:50,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116230154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:50,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:50,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116230155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:50,270 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/22892832d1884071a668b9bd1520220a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/22892832d1884071a668b9bd1520220a 2024-11-20T15:22:50,274 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c7be1c0ab4594b9826cba013e95ae89/B of 3c7be1c0ab4594b9826cba013e95ae89 into 22892832d1884071a668b9bd1520220a(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:50,274 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:50,274 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89., storeName=3c7be1c0ab4594b9826cba013e95ae89/B, priority=13, startTime=1732116169825; duration=0sec 2024-11-20T15:22:50,274 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:50,274 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c7be1c0ab4594b9826cba013e95ae89:B 2024-11-20T15:22:50,274 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:50,280 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c7be1c0ab4594b9826cba013e95ae89#A#compaction#370 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:50,280 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:50,280 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 3c7be1c0ab4594b9826cba013e95ae89/C is initiating minor compaction (all files) 2024-11-20T15:22:50,280 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c7be1c0ab4594b9826cba013e95ae89/C in TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:50,281 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/1305cd59f41b4faba7c452733c6fdcff, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/9cab56a1c19f43978eb8adc499d73586, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/8db99bf7b7624a4ebf4c06813b7413c8] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp, totalSize=35.5 K 2024-11-20T15:22:50,281 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/762d89b668624e26ad52a465cd474fc3 is 175, key is test_row_0/A:col10/1732116168247/Put/seqid=0 2024-11-20T15:22:50,281 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 1305cd59f41b4faba7c452733c6fdcff, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1732116165902 2024-11-20T15:22:50,281 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 9cab56a1c19f43978eb8adc499d73586, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1732116167078 2024-11-20T15:22:50,282 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 8db99bf7b7624a4ebf4c06813b7413c8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732116168247 2024-11-20T15:22:50,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742255_1431 (size=31413) 2024-11-20T15:22:50,289 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c7be1c0ab4594b9826cba013e95ae89#C#compaction#372 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:50,289 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/1689f150a3324020af01337c8405d480 is 50, key is test_row_0/C:col10/1732116168247/Put/seqid=0 2024-11-20T15:22:50,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742256_1432 (size=12459) 2024-11-20T15:22:50,354 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:50,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116230352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:50,361 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:50,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116230357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:50,361 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:50,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116230357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:50,362 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:50,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116230358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:50,363 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:50,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116230360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:50,437 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:50,441 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209683b72655ef400bbce929d71a663fb0_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209683b72655ef400bbce929d71a663fb0_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:50,441 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/4094299df5154c2e8318006ac4d05c3c, store: [table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:50,442 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/4094299df5154c2e8318006ac4d05c3c is 175, key is test_row_0/A:col10/1732116168902/Put/seqid=0 2024-11-20T15:22:50,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742257_1433 (size=39749) 2024-11-20T15:22:50,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:50,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116230656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:50,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:50,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116230663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:50,667 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:50,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116230664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:50,668 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:50,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116230664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:50,669 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:50,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116230665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:50,691 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/762d89b668624e26ad52a465cd474fc3 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/762d89b668624e26ad52a465cd474fc3 2024-11-20T15:22:50,695 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c7be1c0ab4594b9826cba013e95ae89/A of 3c7be1c0ab4594b9826cba013e95ae89 into 762d89b668624e26ad52a465cd474fc3(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:50,695 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:50,695 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89., storeName=3c7be1c0ab4594b9826cba013e95ae89/A, priority=13, startTime=1732116169825; duration=0sec 2024-11-20T15:22:50,695 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:50,695 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c7be1c0ab4594b9826cba013e95ae89:A 2024-11-20T15:22:50,701 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/1689f150a3324020af01337c8405d480 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/1689f150a3324020af01337c8405d480 2024-11-20T15:22:50,705 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c7be1c0ab4594b9826cba013e95ae89/C of 3c7be1c0ab4594b9826cba013e95ae89 into 1689f150a3324020af01337c8405d480(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:50,705 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:50,705 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89., storeName=3c7be1c0ab4594b9826cba013e95ae89/C, priority=13, startTime=1732116169825; duration=0sec 2024-11-20T15:22:50,705 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:50,705 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c7be1c0ab4594b9826cba013e95ae89:C 2024-11-20T15:22:50,846 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=160, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/4094299df5154c2e8318006ac4d05c3c 2024-11-20T15:22:50,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/5b82cdc84caf4f75b21d57da2e3e4e82 is 50, key is test_row_0/B:col10/1732116168902/Put/seqid=0 2024-11-20T15:22:50,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742258_1434 (size=12151) 2024-11-20T15:22:51,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T15:22:51,006 INFO [Thread-1753 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-11-20T15:22:51,007 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:22:51,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-11-20T15:22:51,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T15:22:51,009 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:22:51,009 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:22:51,009 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:22:51,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T15:22:51,161 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:51,161 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-20T15:22:51,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:51,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:51,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:51,162 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:51,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:51,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:51,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:51,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116231160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:51,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:51,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116231167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:51,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:51,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116231172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:51,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:51,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116231172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:51,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:51,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116231174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:51,258 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/5b82cdc84caf4f75b21d57da2e3e4e82 2024-11-20T15:22:51,270 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/53921b72a99349e19b6e92be10d13498 is 50, key is test_row_0/C:col10/1732116168902/Put/seqid=0 2024-11-20T15:22:51,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742259_1435 (size=12151) 2024-11-20T15:22:51,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T15:22:51,314 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:51,314 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-20T15:22:51,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:51,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:51,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:51,315 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:51,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:51,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:51,468 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:51,469 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-20T15:22:51,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:51,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:51,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:51,469 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:51,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:51,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:51,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T15:22:51,621 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:51,621 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-20T15:22:51,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:51,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:51,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:51,622 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:51,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:51,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:51,695 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/53921b72a99349e19b6e92be10d13498 2024-11-20T15:22:51,700 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/4094299df5154c2e8318006ac4d05c3c as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/4094299df5154c2e8318006ac4d05c3c 2024-11-20T15:22:51,703 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/4094299df5154c2e8318006ac4d05c3c, entries=200, sequenceid=160, filesize=38.8 K 2024-11-20T15:22:51,703 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-20T15:22:51,704 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/5b82cdc84caf4f75b21d57da2e3e4e82 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/5b82cdc84caf4f75b21d57da2e3e4e82 2024-11-20T15:22:51,708 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/5b82cdc84caf4f75b21d57da2e3e4e82, entries=150, sequenceid=160, filesize=11.9 K 2024-11-20T15:22:51,708 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/53921b72a99349e19b6e92be10d13498 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/53921b72a99349e19b6e92be10d13498 2024-11-20T15:22:51,712 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/53921b72a99349e19b6e92be10d13498, entries=150, sequenceid=160, filesize=11.9 K 2024-11-20T15:22:51,713 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 3c7be1c0ab4594b9826cba013e95ae89 in 1688ms, sequenceid=160, compaction requested=false 2024-11-20T15:22:51,713 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:51,774 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:51,774 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-20T15:22:51,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:51,775 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 3c7be1c0ab4594b9826cba013e95ae89 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T15:22:51,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=A 2024-11-20T15:22:51,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:51,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=B 2024-11-20T15:22:51,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:51,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=C 2024-11-20T15:22:51,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:51,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120003b95873d0442fb9a1be6125dd97cf5_3c7be1c0ab4594b9826cba013e95ae89 is 50, key is test_row_0/A:col10/1732116170046/Put/seqid=0 2024-11-20T15:22:51,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742260_1436 (size=12304) 2024-11-20T15:22:51,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:51,792 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120003b95873d0442fb9a1be6125dd97cf5_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120003b95873d0442fb9a1be6125dd97cf5_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:51,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/d2c125513c2243ba9657966db967c6c9, store: [table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:51,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/d2c125513c2243ba9657966db967c6c9 is 175, key is test_row_0/A:col10/1732116170046/Put/seqid=0 2024-11-20T15:22:51,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742261_1437 (size=31105) 2024-11-20T15:22:52,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T15:22:52,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:52,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:52,198 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=178, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/d2c125513c2243ba9657966db967c6c9 2024-11-20T15:22:52,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/2341df51bb5b4431bdde11feb0d3a00e is 50, key is test_row_0/B:col10/1732116170046/Put/seqid=0 2024-11-20T15:22:52,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:52,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116232203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:52,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:52,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116232204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:52,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:52,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116232204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:52,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:52,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116232206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:52,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:52,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116232206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:52,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742262_1438 (size=12151) 2024-11-20T15:22:52,314 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:52,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116232311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:52,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:52,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116232311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:52,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:52,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116232312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:52,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:52,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:52,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116232312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:52,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116232312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:52,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:52,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116232516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:52,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:52,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116232516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:52,521 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:52,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116232517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:52,521 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:52,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116232517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:52,521 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:52,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116232517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:52,613 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=178 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/2341df51bb5b4431bdde11feb0d3a00e 2024-11-20T15:22:52,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/c089c4edf4ed453ca0e7f5bce963a717 is 50, key is test_row_0/C:col10/1732116170046/Put/seqid=0 2024-11-20T15:22:52,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742263_1439 (size=12151) 2024-11-20T15:22:52,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:52,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116232820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:52,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:52,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116232822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:52,827 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:52,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116232822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:52,827 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:52,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116232823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:52,827 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:52,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116232823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:53,025 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=178 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/c089c4edf4ed453ca0e7f5bce963a717 2024-11-20T15:22:53,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/d2c125513c2243ba9657966db967c6c9 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/d2c125513c2243ba9657966db967c6c9 2024-11-20T15:22:53,032 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/d2c125513c2243ba9657966db967c6c9, entries=150, sequenceid=178, filesize=30.4 K 2024-11-20T15:22:53,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-20T15:22:53,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/2341df51bb5b4431bdde11feb0d3a00e as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/2341df51bb5b4431bdde11feb0d3a00e 2024-11-20T15:22:53,037 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/2341df51bb5b4431bdde11feb0d3a00e, entries=150, sequenceid=178, filesize=11.9 K 2024-11-20T15:22:53,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/c089c4edf4ed453ca0e7f5bce963a717 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/c089c4edf4ed453ca0e7f5bce963a717 2024-11-20T15:22:53,041 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/c089c4edf4ed453ca0e7f5bce963a717, entries=150, sequenceid=178, filesize=11.9 K 2024-11-20T15:22:53,042 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 3c7be1c0ab4594b9826cba013e95ae89 in 1267ms, sequenceid=178, compaction requested=true 2024-11-20T15:22:53,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:53,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:53,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-11-20T15:22:53,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-11-20T15:22:53,044 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-11-20T15:22:53,044 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0340 sec 2024-11-20T15:22:53,045 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 2.0370 sec 2024-11-20T15:22:53,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T15:22:53,112 INFO [Thread-1753 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-11-20T15:22:53,114 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:22:53,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-11-20T15:22:53,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T15:22:53,115 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:22:53,115 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:22:53,115 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:22:53,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T15:22:53,267 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:53,267 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T15:22:53,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:53,268 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing 3c7be1c0ab4594b9826cba013e95ae89 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T15:22:53,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=A 2024-11-20T15:22:53,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:53,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=B 2024-11-20T15:22:53,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:53,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=C 2024-11-20T15:22:53,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:53,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112066f365dfafc6481d88f76a7675dfda72_3c7be1c0ab4594b9826cba013e95ae89 is 50, key is test_row_0/A:col10/1732116172204/Put/seqid=0 2024-11-20T15:22:53,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742264_1440 (size=12304) 2024-11-20T15:22:53,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:53,281 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112066f365dfafc6481d88f76a7675dfda72_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112066f365dfafc6481d88f76a7675dfda72_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:53,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/9fe93e31ad994eeaa1b99ffff5041836, store: [table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:53,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/9fe93e31ad994eeaa1b99ffff5041836 is 175, key is test_row_0/A:col10/1732116172204/Put/seqid=0 2024-11-20T15:22:53,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742265_1441 (size=31105) 2024-11-20T15:22:53,333 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:53,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:53,357 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:53,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116233350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:53,362 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:53,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116233353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:53,363 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:53,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116233356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:53,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:53,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116233358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:53,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:53,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116233357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:53,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T15:22:53,459 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:53,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116233459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:53,467 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:53,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116233464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:53,468 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:53,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116233464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:53,469 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:53,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116233468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:53,473 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:53,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116233469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:53,663 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:53,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116233660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:53,672 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:53,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116233668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:53,673 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:53,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116233670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:53,673 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:53,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116233670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:53,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:53,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116233674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:53,694 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=200, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/9fe93e31ad994eeaa1b99ffff5041836 2024-11-20T15:22:53,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/c1391986f6d247bca17deaf1716b6190 is 50, key is test_row_0/B:col10/1732116172204/Put/seqid=0 2024-11-20T15:22:53,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742266_1442 (size=12151) 2024-11-20T15:22:53,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T15:22:53,969 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:53,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116233966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:53,977 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:53,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116233974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:53,979 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:53,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116233975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:53,979 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:53,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116233975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:53,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:53,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116233983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:54,106 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/c1391986f6d247bca17deaf1716b6190 2024-11-20T15:22:54,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/112aab5035c24982abbc26153d6e2d7b is 50, key is test_row_0/C:col10/1732116172204/Put/seqid=0 2024-11-20T15:22:54,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742267_1443 (size=12151) 2024-11-20T15:22:54,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T15:22:54,477 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:54,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116234471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:54,488 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:54,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116234482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:54,488 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:54,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116234483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:54,488 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:54,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116234485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:54,495 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:54,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116234488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:54,573 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/112aab5035c24982abbc26153d6e2d7b 2024-11-20T15:22:54,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/9fe93e31ad994eeaa1b99ffff5041836 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/9fe93e31ad994eeaa1b99ffff5041836 2024-11-20T15:22:54,582 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/9fe93e31ad994eeaa1b99ffff5041836, entries=150, sequenceid=200, filesize=30.4 K 2024-11-20T15:22:54,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/c1391986f6d247bca17deaf1716b6190 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c1391986f6d247bca17deaf1716b6190 2024-11-20T15:22:54,587 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c1391986f6d247bca17deaf1716b6190, entries=150, sequenceid=200, filesize=11.9 K 2024-11-20T15:22:54,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/112aab5035c24982abbc26153d6e2d7b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/112aab5035c24982abbc26153d6e2d7b 2024-11-20T15:22:54,592 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/112aab5035c24982abbc26153d6e2d7b, entries=150, sequenceid=200, filesize=11.9 K 2024-11-20T15:22:54,593 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 3c7be1c0ab4594b9826cba013e95ae89 in 1325ms, sequenceid=200, compaction requested=true 2024-11-20T15:22:54,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:54,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:54,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-11-20T15:22:54,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-11-20T15:22:54,596 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-11-20T15:22:54,596 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4790 sec 2024-11-20T15:22:54,597 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 1.4820 sec 2024-11-20T15:22:55,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T15:22:55,220 INFO [Thread-1753 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-11-20T15:22:55,222 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:22:55,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-11-20T15:22:55,225 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:22:55,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T15:22:55,226 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:22:55,226 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:22:55,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T15:22:55,378 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:55,378 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T15:22:55,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:55,379 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing 3c7be1c0ab4594b9826cba013e95ae89 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T15:22:55,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=A 2024-11-20T15:22:55,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:55,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=B 2024-11-20T15:22:55,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:55,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=C 2024-11-20T15:22:55,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:55,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e643df1f1d89494eb929fc76a65e51b0_3c7be1c0ab4594b9826cba013e95ae89 is 50, key is test_row_0/A:col10/1732116173355/Put/seqid=0 2024-11-20T15:22:55,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742268_1444 (size=12304) 2024-11-20T15:22:55,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:55,466 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e643df1f1d89494eb929fc76a65e51b0_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e643df1f1d89494eb929fc76a65e51b0_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:55,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/4ce0b484c2274d57a233a0efe707552b, store: [table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:55,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/4ce0b484c2274d57a233a0efe707552b is 175, key is test_row_0/A:col10/1732116173355/Put/seqid=0 2024-11-20T15:22:55,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:55,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:55,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742269_1445 (size=31105) 2024-11-20T15:22:55,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T15:22:55,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:55,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116235550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:55,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:55,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116235551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:55,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:55,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116235559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:55,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:55,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116235565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:55,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:55,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116235566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:55,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:55,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116235669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:55,687 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:55,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116235670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:55,687 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:55,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116235671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:55,692 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:55,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116235680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:55,693 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:55,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116235680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:55,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T15:22:55,888 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:55,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116235881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:55,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:55,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116235890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:55,898 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:55,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116235890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:55,906 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=214, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/4ce0b484c2274d57a233a0efe707552b 2024-11-20T15:22:55,906 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:55,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:55,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116235895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:55,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116235894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:55,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/b64d95fc54834ef98e594f0c00e923f7 is 50, key is test_row_0/B:col10/1732116173355/Put/seqid=0 2024-11-20T15:22:55,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742270_1446 (size=12151) 2024-11-20T15:22:56,203 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:56,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116236195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:56,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:56,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116236201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:56,205 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:56,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116236201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:56,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:56,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116236208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:56,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:56,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116236210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:56,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T15:22:56,369 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/b64d95fc54834ef98e594f0c00e923f7 2024-11-20T15:22:56,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/01a480b701ec4d719aff248b2117f507 is 50, key is test_row_0/C:col10/1732116173355/Put/seqid=0 2024-11-20T15:22:56,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742271_1447 (size=12151) 2024-11-20T15:22:56,420 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/01a480b701ec4d719aff248b2117f507 2024-11-20T15:22:56,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/4ce0b484c2274d57a233a0efe707552b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/4ce0b484c2274d57a233a0efe707552b 2024-11-20T15:22:56,434 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/4ce0b484c2274d57a233a0efe707552b, entries=150, sequenceid=214, filesize=30.4 K 2024-11-20T15:22:56,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/b64d95fc54834ef98e594f0c00e923f7 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/b64d95fc54834ef98e594f0c00e923f7 2024-11-20T15:22:56,449 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/b64d95fc54834ef98e594f0c00e923f7, entries=150, sequenceid=214, filesize=11.9 K 2024-11-20T15:22:56,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/01a480b701ec4d719aff248b2117f507 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/01a480b701ec4d719aff248b2117f507 2024-11-20T15:22:56,463 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/01a480b701ec4d719aff248b2117f507, entries=150, sequenceid=214, filesize=11.9 K 2024-11-20T15:22:56,463 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 3c7be1c0ab4594b9826cba013e95ae89 in 1084ms, sequenceid=214, compaction requested=true 2024-11-20T15:22:56,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:56,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:56,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-11-20T15:22:56,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-11-20T15:22:56,466 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-11-20T15:22:56,466 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2390 sec 2024-11-20T15:22:56,468 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 1.2450 sec 2024-11-20T15:22:56,718 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c7be1c0ab4594b9826cba013e95ae89 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T15:22:56,718 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=A 2024-11-20T15:22:56,718 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:56,718 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=B 2024-11-20T15:22:56,718 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:56,718 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=C 2024-11-20T15:22:56,718 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:56,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:56,732 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a025f0e87d494fb38d500f2b05f5efad_3c7be1c0ab4594b9826cba013e95ae89 is 50, key is test_row_0/A:col10/1732116175553/Put/seqid=0 2024-11-20T15:22:56,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742272_1448 (size=14794) 2024-11-20T15:22:56,752 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,757 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a025f0e87d494fb38d500f2b05f5efad_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a025f0e87d494fb38d500f2b05f5efad_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:56,758 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/466e38e368ab403e9d477a4345e96626, store: [table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:56,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/466e38e368ab403e9d477a4345e96626 is 175, key is test_row_0/A:col10/1732116175553/Put/seqid=0 2024-11-20T15:22:56,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742273_1449 (size=39749) 2024-11-20T15:22:56,768 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=237, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/466e38e368ab403e9d477a4345e96626 2024-11-20T15:22:56,769 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:56,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116236755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:56,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:56,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116236759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:56,777 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:56,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116236761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:56,778 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:56,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116236762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:56,778 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/38a648fe9d9044b180ef1f9144ddde65 is 50, key is test_row_0/B:col10/1732116175553/Put/seqid=0 2024-11-20T15:22:56,778 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:56,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116236764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:56,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742274_1450 (size=12151) 2024-11-20T15:22:56,813 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/38a648fe9d9044b180ef1f9144ddde65 2024-11-20T15:22:56,838 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/3bc0c44987204c27b65fd38d9007cb83 is 50, key is test_row_0/C:col10/1732116175553/Put/seqid=0 2024-11-20T15:22:56,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742275_1451 (size=12151) 2024-11-20T15:22:56,881 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/3bc0c44987204c27b65fd38d9007cb83 2024-11-20T15:22:56,881 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:56,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116236871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:56,886 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/466e38e368ab403e9d477a4345e96626 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/466e38e368ab403e9d477a4345e96626 2024-11-20T15:22:56,890 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/466e38e368ab403e9d477a4345e96626, entries=200, sequenceid=237, filesize=38.8 K 2024-11-20T15:22:56,891 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/38a648fe9d9044b180ef1f9144ddde65 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/38a648fe9d9044b180ef1f9144ddde65 2024-11-20T15:22:56,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:56,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116236880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:56,892 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:56,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116236881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:56,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:56,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116236881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:56,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,895 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/38a648fe9d9044b180ef1f9144ddde65, entries=150, sequenceid=237, filesize=11.9 K 2024-11-20T15:22:56,896 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/3bc0c44987204c27b65fd38d9007cb83 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/3bc0c44987204c27b65fd38d9007cb83 2024-11-20T15:22:56,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:56,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116236886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:56,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,903 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/3bc0c44987204c27b65fd38d9007cb83, entries=150, sequenceid=237, filesize=11.9 K 2024-11-20T15:22:56,904 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 3c7be1c0ab4594b9826cba013e95ae89 in 186ms, sequenceid=237, compaction requested=true 2024-11-20T15:22:56,904 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:56,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c7be1c0ab4594b9826cba013e95ae89:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:22:56,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:56,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c7be1c0ab4594b9826cba013e95ae89:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:22:56,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:56,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c7be1c0ab4594b9826cba013e95ae89:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:22:56,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T15:22:56,904 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-20T15:22:56,904 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-20T15:22:56,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,913 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 73214 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-20T15:22:56,913 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 204226 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-20T15:22:56,913 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 3c7be1c0ab4594b9826cba013e95ae89/A is initiating minor compaction (all files) 2024-11-20T15:22:56,913 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 3c7be1c0ab4594b9826cba013e95ae89/B is initiating minor compaction (all files) 2024-11-20T15:22:56,913 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c7be1c0ab4594b9826cba013e95ae89/B in TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:56,914 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/22892832d1884071a668b9bd1520220a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/5b82cdc84caf4f75b21d57da2e3e4e82, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/2341df51bb5b4431bdde11feb0d3a00e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c1391986f6d247bca17deaf1716b6190, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/b64d95fc54834ef98e594f0c00e923f7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/38a648fe9d9044b180ef1f9144ddde65] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp, totalSize=71.5 K 2024-11-20T15:22:56,914 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c7be1c0ab4594b9826cba013e95ae89/A in TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:56,914 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/762d89b668624e26ad52a465cd474fc3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/4094299df5154c2e8318006ac4d05c3c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/d2c125513c2243ba9657966db967c6c9, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/9fe93e31ad994eeaa1b99ffff5041836, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/4ce0b484c2274d57a233a0efe707552b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/466e38e368ab403e9d477a4345e96626] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp, totalSize=199.4 K 2024-11-20T15:22:56,914 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=10 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:56,914 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. files: [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/762d89b668624e26ad52a465cd474fc3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/4094299df5154c2e8318006ac4d05c3c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/d2c125513c2243ba9657966db967c6c9, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/9fe93e31ad994eeaa1b99ffff5041836, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/4ce0b484c2274d57a233a0efe707552b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/466e38e368ab403e9d477a4345e96626] 2024-11-20T15:22:56,915 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22892832d1884071a668b9bd1520220a, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732116168247 2024-11-20T15:22:56,915 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 762d89b668624e26ad52a465cd474fc3, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732116168247 2024-11-20T15:22:56,915 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b82cdc84caf4f75b21d57da2e3e4e82, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1732116168902 2024-11-20T15:22:56,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,915 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 4094299df5154c2e8318006ac4d05c3c, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1732116168892 2024-11-20T15:22:56,915 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2341df51bb5b4431bdde11feb0d3a00e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1732116170046 2024-11-20T15:22:56,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,916 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting d2c125513c2243ba9657966db967c6c9, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1732116170046 2024-11-20T15:22:56,916 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting c1391986f6d247bca17deaf1716b6190, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732116172202 2024-11-20T15:22:56,916 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 9fe93e31ad994eeaa1b99ffff5041836, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732116172202 2024-11-20T15:22:56,917 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting b64d95fc54834ef98e594f0c00e923f7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732116173351 2024-11-20T15:22:56,917 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ce0b484c2274d57a233a0efe707552b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732116173351 2024-11-20T15:22:56,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,917 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 466e38e368ab403e9d477a4345e96626, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732116175553 2024-11-20T15:22:56,917 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38a648fe9d9044b180ef1f9144ddde65, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732116175553 2024-11-20T15:22:56,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,941 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:56,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,956 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c7be1c0ab4594b9826cba013e95ae89#B#compaction#388 average throughput is 0.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:56,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,961 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120815cef254d3f4d55a031d391dbcd222d_3c7be1c0ab4594b9826cba013e95ae89 store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:56,964 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/2c55c4ae9df74483bf715b7840282919 is 50, key is test_row_0/B:col10/1732116175553/Put/seqid=0 2024-11-20T15:22:56,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,974 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120815cef254d3f4d55a031d391dbcd222d_3c7be1c0ab4594b9826cba013e95ae89, store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:56,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,974 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120815cef254d3f4d55a031d391dbcd222d_3c7be1c0ab4594b9826cba013e95ae89 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:56,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:56,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742276_1452 (size=12663) 2024-11-20T15:22:57,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742277_1453 (size=4469) 2024-11-20T15:22:57,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,036 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c7be1c0ab4594b9826cba013e95ae89#A#compaction#387 average throughput is 0.26 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:57,037 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/fdf0f39cb56044fcafb86c14fa7273e6 is 175, key is test_row_0/A:col10/1732116175553/Put/seqid=0 2024-11-20T15:22:57,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742278_1454 (size=31617) 2024-11-20T15:22:57,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,137 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c7be1c0ab4594b9826cba013e95ae89 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T15:22:57,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=A 2024-11-20T15:22:57,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:57,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=B 2024-11-20T15:22:57,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:57,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=C 2024-11-20T15:22:57,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:57,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:57,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,175 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209a424e2e97fb4449ad5941ae9d55f77b_3c7be1c0ab4594b9826cba013e95ae89 is 50, key is test_row_1/A:col10/1732116177136/Put/seqid=0 2024-11-20T15:22:57,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742279_1455 (size=12304) 2024-11-20T15:22:57,242 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:57,247 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209a424e2e97fb4449ad5941ae9d55f77b_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209a424e2e97fb4449ad5941ae9d55f77b_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:57,248 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/e600177b17c8423e90fde2eb56b343ac, store: [table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:57,248 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/e600177b17c8423e90fde2eb56b343ac is 175, key is test_row_1/A:col10/1732116177136/Put/seqid=0 2024-11-20T15:22:57,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:57,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116237259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:57,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:57,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116237260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:57,290 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:57,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116237273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:57,291 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:57,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116237273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:57,291 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:57,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116237275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:57,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742280_1456 (size=31101) 2024-11-20T15:22:57,302 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=251, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/e600177b17c8423e90fde2eb56b343ac 2024-11-20T15:22:57,313 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/f7d5542750af4512ab70c1b271aba1fd is 50, key is test_row_1/B:col10/1732116177136/Put/seqid=0 2024-11-20T15:22:57,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T15:22:57,331 INFO [Thread-1753 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-11-20T15:22:57,333 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:22:57,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees 2024-11-20T15:22:57,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T15:22:57,337 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:22:57,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742281_1457 (size=9757) 2024-11-20T15:22:57,340 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:22:57,340 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:22:57,341 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/f7d5542750af4512ab70c1b271aba1fd 2024-11-20T15:22:57,364 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/2c3baf3b04d342edb2abc79303691b47 is 50, key is test_row_1/C:col10/1732116177136/Put/seqid=0 2024-11-20T15:22:57,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:57,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116237386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:57,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:57,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116237388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:57,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:57,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116237392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:57,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:57,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116237392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:57,406 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:57,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116237392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:57,427 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/2c55c4ae9df74483bf715b7840282919 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/2c55c4ae9df74483bf715b7840282919 2024-11-20T15:22:57,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742282_1458 (size=9757) 2024-11-20T15:22:57,432 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 3c7be1c0ab4594b9826cba013e95ae89/B of 3c7be1c0ab4594b9826cba013e95ae89 into 2c55c4ae9df74483bf715b7840282919(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:57,432 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:57,432 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89., storeName=3c7be1c0ab4594b9826cba013e95ae89/B, priority=10, startTime=1732116176904; duration=0sec 2024-11-20T15:22:57,432 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:57,432 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c7be1c0ab4594b9826cba013e95ae89:B 2024-11-20T15:22:57,432 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-20T15:22:57,434 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 73214 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-20T15:22:57,434 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 3c7be1c0ab4594b9826cba013e95ae89/C is initiating minor compaction (all files) 2024-11-20T15:22:57,434 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c7be1c0ab4594b9826cba013e95ae89/C in TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:57,434 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/1689f150a3324020af01337c8405d480, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/53921b72a99349e19b6e92be10d13498, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/c089c4edf4ed453ca0e7f5bce963a717, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/112aab5035c24982abbc26153d6e2d7b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/01a480b701ec4d719aff248b2117f507, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/3bc0c44987204c27b65fd38d9007cb83] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp, totalSize=71.5 K 2024-11-20T15:22:57,435 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1689f150a3324020af01337c8405d480, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732116168247 2024-11-20T15:22:57,435 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 53921b72a99349e19b6e92be10d13498, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1732116168902 2024-11-20T15:22:57,435 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting c089c4edf4ed453ca0e7f5bce963a717, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1732116170046 2024-11-20T15:22:57,436 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 112aab5035c24982abbc26153d6e2d7b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732116172202 2024-11-20T15:22:57,436 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 01a480b701ec4d719aff248b2117f507, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732116173351 2024-11-20T15:22:57,436 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3bc0c44987204c27b65fd38d9007cb83, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732116175553 2024-11-20T15:22:57,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T15:22:57,449 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c7be1c0ab4594b9826cba013e95ae89#C#compaction#392 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:57,449 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/50de2c16dc164f95964f74a3efdb3a6f is 50, key is test_row_0/C:col10/1732116175553/Put/seqid=0 2024-11-20T15:22:57,469 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/fdf0f39cb56044fcafb86c14fa7273e6 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/fdf0f39cb56044fcafb86c14fa7273e6 2024-11-20T15:22:57,474 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 3c7be1c0ab4594b9826cba013e95ae89/A of 3c7be1c0ab4594b9826cba013e95ae89 into fdf0f39cb56044fcafb86c14fa7273e6(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:57,474 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:57,474 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89., storeName=3c7be1c0ab4594b9826cba013e95ae89/A, priority=10, startTime=1732116176904; duration=0sec 2024-11-20T15:22:57,474 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:57,474 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c7be1c0ab4594b9826cba013e95ae89:A 2024-11-20T15:22:57,492 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:57,493 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T15:22:57,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:57,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:57,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:57,493 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:57,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:57,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:57,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742283_1459 (size=12663) 2024-11-20T15:22:57,602 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:57,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116237597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:57,613 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:57,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116237605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:57,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:57,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116237606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:57,615 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:57,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116237606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:57,615 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:57,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116237607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:57,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T15:22:57,645 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:57,646 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T15:22:57,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:57,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:57,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:57,646 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:57,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:57,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:57,799 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:57,799 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T15:22:57,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:57,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:57,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:57,800 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:57,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:57,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:57,830 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/2c3baf3b04d342edb2abc79303691b47 2024-11-20T15:22:57,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/e600177b17c8423e90fde2eb56b343ac as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/e600177b17c8423e90fde2eb56b343ac 2024-11-20T15:22:57,840 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/e600177b17c8423e90fde2eb56b343ac, entries=150, sequenceid=251, filesize=30.4 K 2024-11-20T15:22:57,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/f7d5542750af4512ab70c1b271aba1fd as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/f7d5542750af4512ab70c1b271aba1fd 2024-11-20T15:22:57,846 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/f7d5542750af4512ab70c1b271aba1fd, entries=100, sequenceid=251, filesize=9.5 K 2024-11-20T15:22:57,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/2c3baf3b04d342edb2abc79303691b47 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/2c3baf3b04d342edb2abc79303691b47 2024-11-20T15:22:57,851 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/2c3baf3b04d342edb2abc79303691b47, entries=100, sequenceid=251, filesize=9.5 K 2024-11-20T15:22:57,852 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 3c7be1c0ab4594b9826cba013e95ae89 in 715ms, sequenceid=251, compaction requested=false 2024-11-20T15:22:57,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:57,902 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/50de2c16dc164f95964f74a3efdb3a6f as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/50de2c16dc164f95964f74a3efdb3a6f 2024-11-20T15:22:57,907 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 3c7be1c0ab4594b9826cba013e95ae89/C of 3c7be1c0ab4594b9826cba013e95ae89 into 50de2c16dc164f95964f74a3efdb3a6f(size=12.4 K), total size for store is 21.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:57,907 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:57,907 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89., storeName=3c7be1c0ab4594b9826cba013e95ae89/C, priority=10, startTime=1732116176904; duration=0sec 2024-11-20T15:22:57,907 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:57,907 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c7be1c0ab4594b9826cba013e95ae89:C 2024-11-20T15:22:57,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:57,916 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c7be1c0ab4594b9826cba013e95ae89 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T15:22:57,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=A 2024-11-20T15:22:57,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:57,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=B 2024-11-20T15:22:57,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:57,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=C 2024-11-20T15:22:57,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:57,933 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120175935bcd8f04f55ba2ed6dfd8bd1e3f_3c7be1c0ab4594b9826cba013e95ae89 is 50, key is test_row_0/A:col10/1732116177271/Put/seqid=0 2024-11-20T15:22:57,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T15:22:57,952 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:57,953 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T15:22:57,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:57,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:57,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:57,954 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:57,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:57,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:57,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:57,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116237957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:57,969 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:57,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116237959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:57,969 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:57,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116237959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:57,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:57,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116237963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:57,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:57,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116237964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:58,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742284_1460 (size=12454) 2024-11-20T15:22:58,005 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,008 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120175935bcd8f04f55ba2ed6dfd8bd1e3f_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120175935bcd8f04f55ba2ed6dfd8bd1e3f_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:58,009 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/853acd2be98c46d6838b0f6284ce9bcd, store: [table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:58,010 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/853acd2be98c46d6838b0f6284ce9bcd is 175, key is test_row_0/A:col10/1732116177271/Put/seqid=0 2024-11-20T15:22:58,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742285_1461 (size=31255) 2024-11-20T15:22:58,044 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=277, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/853acd2be98c46d6838b0f6284ce9bcd 2024-11-20T15:22:58,074 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/c6a3530125d945fca08ddbcf1e7ef52d is 50, key is test_row_0/B:col10/1732116177271/Put/seqid=0 2024-11-20T15:22:58,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:58,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116238069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:58,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:58,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116238070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:58,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:58,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116238070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:58,080 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:58,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116238072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:58,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:58,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116238075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:58,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742286_1462 (size=12301) 2024-11-20T15:22:58,107 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:58,107 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T15:22:58,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:58,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:58,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:58,110 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:58,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:58,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:58,262 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:58,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T15:22:58,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:58,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:58,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:58,263 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:58,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:58,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:58,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:58,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116238281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:58,288 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:58,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116238281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:58,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:58,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116238281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:58,290 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:58,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116238281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:58,291 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:58,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116238286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:58,415 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:58,416 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T15:22:58,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:58,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:58,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:58,416 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:58,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:58,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:58,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T15:22:58,491 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/c6a3530125d945fca08ddbcf1e7ef52d 2024-11-20T15:22:58,528 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/d4d498f2d42942bbb6455e22b2678f31 is 50, key is test_row_0/C:col10/1732116177271/Put/seqid=0 2024-11-20T15:22:58,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742287_1463 (size=12301) 2024-11-20T15:22:58,570 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/d4d498f2d42942bbb6455e22b2678f31 2024-11-20T15:22:58,571 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:58,571 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T15:22:58,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:58,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:58,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:58,572 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:58,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:58,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:58,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/853acd2be98c46d6838b0f6284ce9bcd as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/853acd2be98c46d6838b0f6284ce9bcd 2024-11-20T15:22:58,588 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/853acd2be98c46d6838b0f6284ce9bcd, entries=150, sequenceid=277, filesize=30.5 K 2024-11-20T15:22:58,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/c6a3530125d945fca08ddbcf1e7ef52d as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c6a3530125d945fca08ddbcf1e7ef52d 2024-11-20T15:22:58,594 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c6a3530125d945fca08ddbcf1e7ef52d, entries=150, sequenceid=277, filesize=12.0 K 2024-11-20T15:22:58,594 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/d4d498f2d42942bbb6455e22b2678f31 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/d4d498f2d42942bbb6455e22b2678f31 2024-11-20T15:22:58,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:58,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116238591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:58,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:58,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116238591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:58,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:58,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116238592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:58,604 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:58,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116238593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:58,604 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:58,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116238594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:58,605 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/d4d498f2d42942bbb6455e22b2678f31, entries=150, sequenceid=277, filesize=12.0 K 2024-11-20T15:22:58,608 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 3c7be1c0ab4594b9826cba013e95ae89 in 692ms, sequenceid=277, compaction requested=true 2024-11-20T15:22:58,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:58,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c7be1c0ab4594b9826cba013e95ae89:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:22:58,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:58,608 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:58,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c7be1c0ab4594b9826cba013e95ae89:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:22:58,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:58,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c7be1c0ab4594b9826cba013e95ae89:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:22:58,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:58,608 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:58,609 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93973 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:58,609 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 3c7be1c0ab4594b9826cba013e95ae89/A is initiating minor compaction (all files) 2024-11-20T15:22:58,609 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c7be1c0ab4594b9826cba013e95ae89/A in TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:58,609 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/fdf0f39cb56044fcafb86c14fa7273e6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/e600177b17c8423e90fde2eb56b343ac, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/853acd2be98c46d6838b0f6284ce9bcd] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp, totalSize=91.8 K 2024-11-20T15:22:58,609 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:58,609 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. files: [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/fdf0f39cb56044fcafb86c14fa7273e6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/e600177b17c8423e90fde2eb56b343ac, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/853acd2be98c46d6838b0f6284ce9bcd] 2024-11-20T15:22:58,610 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:58,610 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 3c7be1c0ab4594b9826cba013e95ae89/B is initiating minor compaction (all files) 2024-11-20T15:22:58,610 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c7be1c0ab4594b9826cba013e95ae89/B in TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:58,610 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/2c55c4ae9df74483bf715b7840282919, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/f7d5542750af4512ab70c1b271aba1fd, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c6a3530125d945fca08ddbcf1e7ef52d] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp, totalSize=33.9 K 2024-11-20T15:22:58,610 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting fdf0f39cb56044fcafb86c14fa7273e6, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732116175553 2024-11-20T15:22:58,610 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c55c4ae9df74483bf715b7840282919, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732116175553 2024-11-20T15:22:58,610 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting e600177b17c8423e90fde2eb56b343ac, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732116176760 2024-11-20T15:22:58,611 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting f7d5542750af4512ab70c1b271aba1fd, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732116176763 2024-11-20T15:22:58,611 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 853acd2be98c46d6838b0f6284ce9bcd, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732116177271 2024-11-20T15:22:58,613 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting c6a3530125d945fca08ddbcf1e7ef52d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732116177271 2024-11-20T15:22:58,635 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:58,646 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c7be1c0ab4594b9826cba013e95ae89#B#compaction#397 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:58,646 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/240fb8ffe404407197a1ce9ff0bab67f is 50, key is test_row_0/B:col10/1732116177271/Put/seqid=0 2024-11-20T15:22:58,650 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120c926f46f22f54b7a8e297871b00855a3_3c7be1c0ab4594b9826cba013e95ae89 store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:58,652 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120c926f46f22f54b7a8e297871b00855a3_3c7be1c0ab4594b9826cba013e95ae89, store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:58,652 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c926f46f22f54b7a8e297871b00855a3_3c7be1c0ab4594b9826cba013e95ae89 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:58,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742288_1464 (size=12915) 2024-11-20T15:22:58,687 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/240fb8ffe404407197a1ce9ff0bab67f as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/240fb8ffe404407197a1ce9ff0bab67f 2024-11-20T15:22:58,697 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c7be1c0ab4594b9826cba013e95ae89/B of 3c7be1c0ab4594b9826cba013e95ae89 into 240fb8ffe404407197a1ce9ff0bab67f(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:58,697 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:58,697 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89., storeName=3c7be1c0ab4594b9826cba013e95ae89/B, priority=13, startTime=1732116178608; duration=0sec 2024-11-20T15:22:58,697 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:58,697 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c7be1c0ab4594b9826cba013e95ae89:B 2024-11-20T15:22:58,697 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:58,699 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:58,699 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 3c7be1c0ab4594b9826cba013e95ae89/C is initiating minor compaction (all files) 2024-11-20T15:22:58,699 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c7be1c0ab4594b9826cba013e95ae89/C in TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:58,699 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/50de2c16dc164f95964f74a3efdb3a6f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/2c3baf3b04d342edb2abc79303691b47, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/d4d498f2d42942bbb6455e22b2678f31] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp, totalSize=33.9 K 2024-11-20T15:22:58,699 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 50de2c16dc164f95964f74a3efdb3a6f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732116175553 2024-11-20T15:22:58,700 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c3baf3b04d342edb2abc79303691b47, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732116176763 2024-11-20T15:22:58,700 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting d4d498f2d42942bbb6455e22b2678f31, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732116177271 2024-11-20T15:22:58,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742289_1465 (size=4469) 2024-11-20T15:22:58,709 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c7be1c0ab4594b9826cba013e95ae89#C#compaction#398 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:58,709 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/b8950f349d55434293f1666a0c049efa is 50, key is test_row_0/C:col10/1732116177271/Put/seqid=0 2024-11-20T15:22:58,710 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c7be1c0ab4594b9826cba013e95ae89#A#compaction#396 average throughput is 0.33 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:58,711 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/8eef99f40044434a90d90e15284e8004 is 175, key is test_row_0/A:col10/1732116177271/Put/seqid=0 2024-11-20T15:22:58,724 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:58,726 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T15:22:58,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:58,727 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2837): Flushing 3c7be1c0ab4594b9826cba013e95ae89 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T15:22:58,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=A 2024-11-20T15:22:58,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:58,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=B 2024-11-20T15:22:58,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:58,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=C 2024-11-20T15:22:58,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:58,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742290_1466 (size=12915) 2024-11-20T15:22:58,745 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/b8950f349d55434293f1666a0c049efa as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/b8950f349d55434293f1666a0c049efa 2024-11-20T15:22:58,750 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c7be1c0ab4594b9826cba013e95ae89/C of 3c7be1c0ab4594b9826cba013e95ae89 into b8950f349d55434293f1666a0c049efa(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:58,750 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:58,750 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89., storeName=3c7be1c0ab4594b9826cba013e95ae89/C, priority=13, startTime=1732116178608; duration=0sec 2024-11-20T15:22:58,750 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:58,750 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c7be1c0ab4594b9826cba013e95ae89:C 2024-11-20T15:22:58,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742291_1467 (size=31869) 2024-11-20T15:22:58,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c0901f0fab6f43b89c5e7cc45feb7a12_3c7be1c0ab4594b9826cba013e95ae89 is 50, key is test_row_0/A:col10/1732116177962/Put/seqid=0 2024-11-20T15:22:58,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742292_1468 (size=12454) 2024-11-20T15:22:58,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,817 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c0901f0fab6f43b89c5e7cc45feb7a12_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c0901f0fab6f43b89c5e7cc45feb7a12_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:58,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/f3e2783087464ea9ad64502f1fbaeee9, store: [table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:58,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/f3e2783087464ea9ad64502f1fbaeee9 is 175, key is test_row_0/A:col10/1732116177962/Put/seqid=0 2024-11-20T15:22:58,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742293_1469 (size=31255) 2024-11-20T15:22:58,845 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=291, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/f3e2783087464ea9ad64502f1fbaeee9 2024-11-20T15:22:58,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/49dfbf141918458b96e5e1bc667b3aab is 50, key is test_row_0/B:col10/1732116177962/Put/seqid=0 2024-11-20T15:22:58,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742294_1470 (size=12301) 2024-11-20T15:22:58,873 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/49dfbf141918458b96e5e1bc667b3aab 2024-11-20T15:22:58,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/7699d197878341b1b40ca8c35f89c870 is 50, key is test_row_0/C:col10/1732116177962/Put/seqid=0 2024-11-20T15:22:58,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742295_1471 (size=12301) 2024-11-20T15:22:58,932 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/7699d197878341b1b40ca8c35f89c870 2024-11-20T15:22:58,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/f3e2783087464ea9ad64502f1fbaeee9 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/f3e2783087464ea9ad64502f1fbaeee9 2024-11-20T15:22:58,945 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/f3e2783087464ea9ad64502f1fbaeee9, entries=150, sequenceid=291, filesize=30.5 K 2024-11-20T15:22:58,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/49dfbf141918458b96e5e1bc667b3aab as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/49dfbf141918458b96e5e1bc667b3aab 2024-11-20T15:22:58,951 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/49dfbf141918458b96e5e1bc667b3aab, entries=150, sequenceid=291, filesize=12.0 K 2024-11-20T15:22:58,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/7699d197878341b1b40ca8c35f89c870 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/7699d197878341b1b40ca8c35f89c870 2024-11-20T15:22:58,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,960 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/7699d197878341b1b40ca8c35f89c870, entries=150, sequenceid=291, filesize=12.0 K 2024-11-20T15:22:58,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,961 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=0 B/0 for 3c7be1c0ab4594b9826cba013e95ae89 in 234ms, sequenceid=291, compaction requested=false 2024-11-20T15:22:58,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:58,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:58,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-11-20T15:22:58,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-11-20T15:22:58,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,965 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-11-20T15:22:58,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,965 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6230 sec 2024-11-20T15:22:58,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,967 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees in 1.6330 sec 2024-11-20T15:22:58,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:58,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,161 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/8eef99f40044434a90d90e15284e8004 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/8eef99f40044434a90d90e15284e8004 2024-11-20T15:22:59,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,167 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c7be1c0ab4594b9826cba013e95ae89/A of 3c7be1c0ab4594b9826cba013e95ae89 into 8eef99f40044434a90d90e15284e8004(size=31.1 K), total size for store is 61.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:22:59,167 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:59,167 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89., storeName=3c7be1c0ab4594b9826cba013e95ae89/A, priority=13, startTime=1732116178608; duration=0sec 2024-11-20T15:22:59,167 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:59,167 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c7be1c0ab4594b9826cba013e95ae89:A 2024-11-20T15:22:59,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:59,309 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c7be1c0ab4594b9826cba013e95ae89 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T15:22:59,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,311 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=A 2024-11-20T15:22:59,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:59,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=B 2024-11-20T15:22:59,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:59,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=C 2024-11-20T15:22:59,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:59,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,322 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b2579c8f8f114b6ba0d0ebd802c2d243_3c7be1c0ab4594b9826cba013e95ae89 is 50, key is test_row_0/A:col10/1732116179248/Put/seqid=0 2024-11-20T15:22:59,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742296_1472 (size=20074) 2024-11-20T15:22:59,338 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:22:59,343 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b2579c8f8f114b6ba0d0ebd802c2d243_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b2579c8f8f114b6ba0d0ebd802c2d243_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:22:59,344 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/e5144331b7664e27829a3874a35c79e0, store: [table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:59,345 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/e5144331b7664e27829a3874a35c79e0 is 175, key is test_row_0/A:col10/1732116179248/Put/seqid=0 2024-11-20T15:22:59,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742297_1473 (size=57333) 2024-11-20T15:22:59,408 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:59,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116239397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:59,409 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:59,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116239398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:59,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:59,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116239400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:59,410 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:59,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116239401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:59,410 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:59,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116239401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:59,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T15:22:59,440 INFO [Thread-1753 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-11-20T15:22:59,442 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:22:59,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees 2024-11-20T15:22:59,444 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:22:59,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T15:22:59,444 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:22:59,445 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:22:59,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:59,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116239510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:59,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:59,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116239511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:59,517 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:59,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116239511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:59,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:59,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116239511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:59,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:59,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116239512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:59,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T15:22:59,598 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:59,598 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-20T15:22:59,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:59,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:59,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:59,599 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:59,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:59,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:59,719 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:59,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116239713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:59,720 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:59,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116239714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:59,725 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:59,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116239719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:59,726 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:59,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116239719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:59,726 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:22:59,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116239721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:22:59,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T15:22:59,751 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:59,753 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-20T15:22:59,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:59,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:22:59,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:59,753 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:59,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:59,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:22:59,764 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=305, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/e5144331b7664e27829a3874a35c79e0 2024-11-20T15:22:59,777 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/095fafe913504f599d79b6507bb05176 is 50, key is test_row_0/B:col10/1732116179248/Put/seqid=0 2024-11-20T15:22:59,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742298_1474 (size=12301) 2024-11-20T15:22:59,793 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=305 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/095fafe913504f599d79b6507bb05176 2024-11-20T15:22:59,814 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/92c4e240225f42a08cd8454a9dced1ae is 50, key is test_row_0/C:col10/1732116179248/Put/seqid=0 2024-11-20T15:22:59,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742299_1475 (size=12301) 2024-11-20T15:22:59,881 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=305 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/92c4e240225f42a08cd8454a9dced1ae 2024-11-20T15:22:59,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/e5144331b7664e27829a3874a35c79e0 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/e5144331b7664e27829a3874a35c79e0 2024-11-20T15:22:59,892 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/e5144331b7664e27829a3874a35c79e0, entries=300, sequenceid=305, filesize=56.0 K 2024-11-20T15:22:59,893 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/095fafe913504f599d79b6507bb05176 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/095fafe913504f599d79b6507bb05176 2024-11-20T15:22:59,897 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/095fafe913504f599d79b6507bb05176, entries=150, sequenceid=305, filesize=12.0 K 2024-11-20T15:22:59,898 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/92c4e240225f42a08cd8454a9dced1ae as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/92c4e240225f42a08cd8454a9dced1ae 2024-11-20T15:22:59,902 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/92c4e240225f42a08cd8454a9dced1ae, entries=150, sequenceid=305, filesize=12.0 K 2024-11-20T15:22:59,903 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 3c7be1c0ab4594b9826cba013e95ae89 in 594ms, sequenceid=305, compaction requested=true 2024-11-20T15:22:59,903 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:22:59,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c7be1c0ab4594b9826cba013e95ae89:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:22:59,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:59,903 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:59,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c7be1c0ab4594b9826cba013e95ae89:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:22:59,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:22:59,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c7be1c0ab4594b9826cba013e95ae89:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:22:59,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:22:59,903 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:22:59,904 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 120457 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:59,904 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 3c7be1c0ab4594b9826cba013e95ae89/A is initiating minor compaction (all files) 2024-11-20T15:22:59,904 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37517 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:22:59,904 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c7be1c0ab4594b9826cba013e95ae89/A in TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:59,904 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 3c7be1c0ab4594b9826cba013e95ae89/B is initiating minor compaction (all files) 2024-11-20T15:22:59,904 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c7be1c0ab4594b9826cba013e95ae89/B in TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:59,904 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/8eef99f40044434a90d90e15284e8004, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/f3e2783087464ea9ad64502f1fbaeee9, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/e5144331b7664e27829a3874a35c79e0] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp, totalSize=117.6 K 2024-11-20T15:22:59,904 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:59,904 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/240fb8ffe404407197a1ce9ff0bab67f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/49dfbf141918458b96e5e1bc667b3aab, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/095fafe913504f599d79b6507bb05176] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp, totalSize=36.6 K 2024-11-20T15:22:59,904 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. files: [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/8eef99f40044434a90d90e15284e8004, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/f3e2783087464ea9ad64502f1fbaeee9, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/e5144331b7664e27829a3874a35c79e0] 2024-11-20T15:22:59,905 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8eef99f40044434a90d90e15284e8004, keycount=150, bloomtype=ROW, size=31.1 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732116177271 2024-11-20T15:22:59,905 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 240fb8ffe404407197a1ce9ff0bab67f, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732116177271 2024-11-20T15:22:59,905 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3e2783087464ea9ad64502f1fbaeee9, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732116177950 2024-11-20T15:22:59,905 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 49dfbf141918458b96e5e1bc667b3aab, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732116177950 2024-11-20T15:22:59,906 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting e5144331b7664e27829a3874a35c79e0, keycount=300, bloomtype=ROW, size=56.0 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1732116179248 2024-11-20T15:22:59,906 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:22:59,906 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 095fafe913504f599d79b6507bb05176, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1732116179248 2024-11-20T15:22:59,906 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-20T15:22:59,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:22:59,907 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2837): Flushing 3c7be1c0ab4594b9826cba013e95ae89 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T15:22:59,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=A 2024-11-20T15:22:59,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:59,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=B 2024-11-20T15:22:59,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:59,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=C 2024-11-20T15:22:59,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:22:59,932 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c7be1c0ab4594b9826cba013e95ae89#B#compaction#405 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:22:59,933 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/c490de17848d4ad8b4248c891788a226 is 50, key is test_row_0/B:col10/1732116179248/Put/seqid=0 2024-11-20T15:22:59,940 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:59,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120adae2a6faa6147b2ba08285235495d32_3c7be1c0ab4594b9826cba013e95ae89 is 50, key is test_row_0/A:col10/1732116179387/Put/seqid=0 2024-11-20T15:22:59,955 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411206015f5d1b2634cc9ae191c3ce57c82ac_3c7be1c0ab4594b9826cba013e95ae89 store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:59,957 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411206015f5d1b2634cc9ae191c3ce57c82ac_3c7be1c0ab4594b9826cba013e95ae89, store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:22:59,957 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411206015f5d1b2634cc9ae191c3ce57c82ac_3c7be1c0ab4594b9826cba013e95ae89 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:23:00,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742300_1476 (size=12454) 2024-11-20T15:23:00,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:00,030 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:23:00,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:00,031 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120adae2a6faa6147b2ba08285235495d32_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120adae2a6faa6147b2ba08285235495d32_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:00,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/c252dbc5e92346fba705565bdc7f556a, store: [table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:23:00,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/c252dbc5e92346fba705565bdc7f556a is 175, key is test_row_0/A:col10/1732116179387/Put/seqid=0 2024-11-20T15:23:00,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742301_1477 (size=13017) 2024-11-20T15:23:00,045 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/c490de17848d4ad8b4248c891788a226 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c490de17848d4ad8b4248c891788a226 2024-11-20T15:23:00,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T15:23:00,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:00,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116240035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:00,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:00,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116240039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:00,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:00,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116240040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:00,052 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:00,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116240050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:00,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:00,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116240050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:00,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742302_1478 (size=4469) 2024-11-20T15:23:00,054 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c7be1c0ab4594b9826cba013e95ae89/B of 3c7be1c0ab4594b9826cba013e95ae89 into c490de17848d4ad8b4248c891788a226(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:00,055 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:23:00,055 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89., storeName=3c7be1c0ab4594b9826cba013e95ae89/B, priority=13, startTime=1732116179903; duration=0sec 2024-11-20T15:23:00,055 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:00,055 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c7be1c0ab4594b9826cba013e95ae89:B 2024-11-20T15:23:00,055 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:00,055 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c7be1c0ab4594b9826cba013e95ae89#A#compaction#406 average throughput is 0.21 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:00,056 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/88de721f061b433287b0e2a88f7dbf66 is 175, key is test_row_0/A:col10/1732116179248/Put/seqid=0 2024-11-20T15:23:00,058 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37517 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:00,058 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 3c7be1c0ab4594b9826cba013e95ae89/C is initiating minor compaction (all files) 2024-11-20T15:23:00,058 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c7be1c0ab4594b9826cba013e95ae89/C in TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:23:00,058 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/b8950f349d55434293f1666a0c049efa, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/7699d197878341b1b40ca8c35f89c870, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/92c4e240225f42a08cd8454a9dced1ae] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp, totalSize=36.6 K 2024-11-20T15:23:00,058 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting b8950f349d55434293f1666a0c049efa, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732116177271 2024-11-20T15:23:00,059 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 7699d197878341b1b40ca8c35f89c870, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732116177950 2024-11-20T15:23:00,059 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 92c4e240225f42a08cd8454a9dced1ae, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1732116179248 2024-11-20T15:23:00,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742303_1479 (size=31255) 2024-11-20T15:23:00,071 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=330, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/c252dbc5e92346fba705565bdc7f556a 2024-11-20T15:23:00,094 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c7be1c0ab4594b9826cba013e95ae89#C#compaction#408 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:00,095 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/67272dc3f2254da1a4fcc8f1cf842efd is 50, key is test_row_0/C:col10/1732116179248/Put/seqid=0 2024-11-20T15:23:00,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/eb420d35a1164b97aee6ec8761a30514 is 50, key is test_row_0/B:col10/1732116179387/Put/seqid=0 2024-11-20T15:23:00,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742304_1480 (size=31971) 2024-11-20T15:23:00,134 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/88de721f061b433287b0e2a88f7dbf66 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/88de721f061b433287b0e2a88f7dbf66 2024-11-20T15:23:00,140 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c7be1c0ab4594b9826cba013e95ae89/A of 3c7be1c0ab4594b9826cba013e95ae89 into 88de721f061b433287b0e2a88f7dbf66(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:00,140 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:23:00,140 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89., storeName=3c7be1c0ab4594b9826cba013e95ae89/A, priority=13, startTime=1732116179903; duration=0sec 2024-11-20T15:23:00,140 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:00,140 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c7be1c0ab4594b9826cba013e95ae89:A 2024-11-20T15:23:00,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:00,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116240152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:00,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:00,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116240152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:00,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:00,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116240153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:00,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:00,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116240153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:00,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:00,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116240154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:00,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742305_1481 (size=13017) 2024-11-20T15:23:00,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742306_1482 (size=12301) 2024-11-20T15:23:00,359 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:00,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116240359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:00,361 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:00,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116240360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:00,367 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:00,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116240362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:00,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:00,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116240363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:00,369 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:00,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116240363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:00,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T15:23:00,566 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/67272dc3f2254da1a4fcc8f1cf842efd as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/67272dc3f2254da1a4fcc8f1cf842efd 2024-11-20T15:23:00,571 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c7be1c0ab4594b9826cba013e95ae89/C of 3c7be1c0ab4594b9826cba013e95ae89 into 67272dc3f2254da1a4fcc8f1cf842efd(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:00,571 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:23:00,571 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89., storeName=3c7be1c0ab4594b9826cba013e95ae89/C, priority=13, startTime=1732116179903; duration=0sec 2024-11-20T15:23:00,571 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:00,571 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c7be1c0ab4594b9826cba013e95ae89:C 2024-11-20T15:23:00,577 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/eb420d35a1164b97aee6ec8761a30514 2024-11-20T15:23:00,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/b5a08f3821034e08a297c72e67a20111 is 50, key is test_row_0/C:col10/1732116179387/Put/seqid=0 2024-11-20T15:23:00,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742307_1483 (size=12301) 2024-11-20T15:23:00,669 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:00,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116240661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:00,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:00,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116240665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:00,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:00,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116240668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:00,674 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:00,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116240671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:00,675 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:00,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116240671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:01,059 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/b5a08f3821034e08a297c72e67a20111 2024-11-20T15:23:01,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/c252dbc5e92346fba705565bdc7f556a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/c252dbc5e92346fba705565bdc7f556a 2024-11-20T15:23:01,069 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/c252dbc5e92346fba705565bdc7f556a, entries=150, sequenceid=330, filesize=30.5 K 2024-11-20T15:23:01,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/eb420d35a1164b97aee6ec8761a30514 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/eb420d35a1164b97aee6ec8761a30514 2024-11-20T15:23:01,074 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/eb420d35a1164b97aee6ec8761a30514, entries=150, sequenceid=330, filesize=12.0 K 2024-11-20T15:23:01,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/b5a08f3821034e08a297c72e67a20111 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/b5a08f3821034e08a297c72e67a20111 2024-11-20T15:23:01,079 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/b5a08f3821034e08a297c72e67a20111, entries=150, sequenceid=330, filesize=12.0 K 2024-11-20T15:23:01,080 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 3c7be1c0ab4594b9826cba013e95ae89 in 1173ms, sequenceid=330, compaction requested=false 2024-11-20T15:23:01,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2538): Flush status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:23:01,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:23:01,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=125 2024-11-20T15:23:01,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=125 2024-11-20T15:23:01,083 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-11-20T15:23:01,083 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6360 sec 2024-11-20T15:23:01,085 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees in 1.6420 sec 2024-11-20T15:23:01,181 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c7be1c0ab4594b9826cba013e95ae89 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T15:23:01,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:01,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=A 2024-11-20T15:23:01,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:01,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=B 2024-11-20T15:23:01,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:01,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=C 2024-11-20T15:23:01,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:01,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c39de9bdaa1e4b7c9967b7a955c73aac_3c7be1c0ab4594b9826cba013e95ae89 is 50, key is test_row_0/A:col10/1732116181177/Put/seqid=0 2024-11-20T15:23:01,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:01,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116241254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:01,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:01,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116241256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:01,268 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:01,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116241257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:01,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:01,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116241264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:01,277 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:01,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116241267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:01,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742308_1484 (size=12454) 2024-11-20T15:23:01,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:01,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116241368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:01,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:01,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116241368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:01,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:01,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116241369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:01,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:01,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116241372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:01,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:01,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116241380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:01,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T15:23:01,549 INFO [Thread-1753 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-11-20T15:23:01,551 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:23:01,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees 2024-11-20T15:23:01,552 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:23:01,553 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:23:01,553 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:23:01,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T15:23:01,582 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:01,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116241574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:01,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:01,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116241575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:01,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:01,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116241575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:01,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:01,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116241583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:01,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:01,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116241589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:01,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T15:23:01,705 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:01,705 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T15:23:01,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:23:01,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:23:01,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:23:01,706 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:01,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:01,707 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:01,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:01,711 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c39de9bdaa1e4b7c9967b7a955c73aac_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c39de9bdaa1e4b7c9967b7a955c73aac_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:01,714 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/d354c54af1714f939c61b51facf10b6a, store: [table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:23:01,714 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/d354c54af1714f939c61b51facf10b6a is 175, key is test_row_0/A:col10/1732116181177/Put/seqid=0 2024-11-20T15:23:01,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742309_1485 (size=31255) 2024-11-20T15:23:01,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T15:23:01,858 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:01,858 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T15:23:01,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:23:01,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:23:01,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:23:01,859 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:01,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:01,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:01,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:01,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116241885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:01,892 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:01,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116241885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:01,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:01,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116241887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:01,896 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:01,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116241892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:01,901 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:01,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116241894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:02,011 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:02,011 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T15:23:02,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:23:02,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:23:02,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:23:02,012 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:02,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:02,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:02,154 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=346, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/d354c54af1714f939c61b51facf10b6a 2024-11-20T15:23:02,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T15:23:02,164 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:02,167 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T15:23:02,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:23:02,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:23:02,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:23:02,169 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:02,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:02,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:02,173 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/c40e5672dde14c5b8bc1450ae4a89b13 is 50, key is test_row_0/B:col10/1732116181177/Put/seqid=0 2024-11-20T15:23:02,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742310_1486 (size=12301) 2024-11-20T15:23:02,209 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=346 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/c40e5672dde14c5b8bc1450ae4a89b13 2024-11-20T15:23:02,221 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/6f9b361eba484b6f80d82d96378da470 is 50, key is test_row_0/C:col10/1732116181177/Put/seqid=0 2024-11-20T15:23:02,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742311_1487 (size=12301) 2024-11-20T15:23:02,321 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:02,322 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T15:23:02,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:23:02,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:23:02,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:23:02,323 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:02,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:02,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:02,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:02,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55922 deadline: 1732116242393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:02,401 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:02,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55894 deadline: 1732116242394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:02,403 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:02,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55892 deadline: 1732116242397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:02,404 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:02,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55950 deadline: 1732116242398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:02,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:02,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55906 deadline: 1732116242412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:02,475 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:02,476 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T15:23:02,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:23:02,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:23:02,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:23:02,476 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:02,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:02,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:02,629 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:02,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T15:23:02,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:23:02,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:23:02,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:23:02,630 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:02,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:02,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:02,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T15:23:02,664 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=346 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/6f9b361eba484b6f80d82d96378da470 2024-11-20T15:23:02,676 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/d354c54af1714f939c61b51facf10b6a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/d354c54af1714f939c61b51facf10b6a 2024-11-20T15:23:02,682 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/d354c54af1714f939c61b51facf10b6a, entries=150, sequenceid=346, filesize=30.5 K 2024-11-20T15:23:02,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/c40e5672dde14c5b8bc1450ae4a89b13 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c40e5672dde14c5b8bc1450ae4a89b13 2024-11-20T15:23:02,687 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c40e5672dde14c5b8bc1450ae4a89b13, entries=150, sequenceid=346, filesize=12.0 K 2024-11-20T15:23:02,687 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/6f9b361eba484b6f80d82d96378da470 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/6f9b361eba484b6f80d82d96378da470 2024-11-20T15:23:02,691 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/6f9b361eba484b6f80d82d96378da470, entries=150, sequenceid=346, filesize=12.0 K 2024-11-20T15:23:02,692 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 3c7be1c0ab4594b9826cba013e95ae89 in 1512ms, sequenceid=346, compaction requested=true 2024-11-20T15:23:02,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:23:02,692 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:02,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c7be1c0ab4594b9826cba013e95ae89:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:23:02,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:02,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c7be1c0ab4594b9826cba013e95ae89:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:23:02,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:02,693 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:02,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c7be1c0ab4594b9826cba013e95ae89:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:23:02,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:02,693 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94481 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:02,694 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 3c7be1c0ab4594b9826cba013e95ae89/A is initiating minor compaction (all files) 2024-11-20T15:23:02,694 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c7be1c0ab4594b9826cba013e95ae89/A in TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:23:02,694 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/88de721f061b433287b0e2a88f7dbf66, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/c252dbc5e92346fba705565bdc7f556a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/d354c54af1714f939c61b51facf10b6a] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp, totalSize=92.3 K 2024-11-20T15:23:02,694 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:23:02,694 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. files: [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/88de721f061b433287b0e2a88f7dbf66, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/c252dbc5e92346fba705565bdc7f556a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/d354c54af1714f939c61b51facf10b6a] 2024-11-20T15:23:02,694 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:02,694 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 3c7be1c0ab4594b9826cba013e95ae89/B is initiating minor compaction (all files) 2024-11-20T15:23:02,694 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c7be1c0ab4594b9826cba013e95ae89/B in TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:23:02,694 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c490de17848d4ad8b4248c891788a226, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/eb420d35a1164b97aee6ec8761a30514, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c40e5672dde14c5b8bc1450ae4a89b13] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp, totalSize=36.7 K 2024-11-20T15:23:02,695 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 88de721f061b433287b0e2a88f7dbf66, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1732116179248 2024-11-20T15:23:02,695 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting c490de17848d4ad8b4248c891788a226, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1732116179248 2024-11-20T15:23:02,695 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting c252dbc5e92346fba705565bdc7f556a, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732116179387 2024-11-20T15:23:02,695 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting eb420d35a1164b97aee6ec8761a30514, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732116179387 2024-11-20T15:23:02,695 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting d354c54af1714f939c61b51facf10b6a, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1732116180048 2024-11-20T15:23:02,696 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting c40e5672dde14c5b8bc1450ae4a89b13, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1732116180048 2024-11-20T15:23:02,716 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:23:02,722 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c7be1c0ab4594b9826cba013e95ae89#B#compaction#415 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:02,723 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/a78ae94f4a9a4417ae92dd57978770e4 is 50, key is test_row_0/B:col10/1732116181177/Put/seqid=0 2024-11-20T15:23:02,728 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120da27164f8abd4625861f4f37162e032b_3c7be1c0ab4594b9826cba013e95ae89 store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:23:02,730 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120da27164f8abd4625861f4f37162e032b_3c7be1c0ab4594b9826cba013e95ae89, store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:23:02,730 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120da27164f8abd4625861f4f37162e032b_3c7be1c0ab4594b9826cba013e95ae89 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:23:02,782 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:02,782 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T15:23:02,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:23:02,782 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2837): Flushing 3c7be1c0ab4594b9826cba013e95ae89 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T15:23:02,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=A 2024-11-20T15:23:02,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:02,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=B 2024-11-20T15:23:02,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:02,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=C 2024-11-20T15:23:02,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:02,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742312_1488 (size=13119) 2024-11-20T15:23:02,798 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/a78ae94f4a9a4417ae92dd57978770e4 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/a78ae94f4a9a4417ae92dd57978770e4 2024-11-20T15:23:02,802 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c7be1c0ab4594b9826cba013e95ae89/B of 3c7be1c0ab4594b9826cba013e95ae89 into a78ae94f4a9a4417ae92dd57978770e4(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:02,802 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:23:02,802 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89., storeName=3c7be1c0ab4594b9826cba013e95ae89/B, priority=13, startTime=1732116182693; duration=0sec 2024-11-20T15:23:02,803 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:02,803 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c7be1c0ab4594b9826cba013e95ae89:B 2024-11-20T15:23:02,803 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:02,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742313_1489 (size=4469) 2024-11-20T15:23:02,804 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:02,804 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 3c7be1c0ab4594b9826cba013e95ae89/C is initiating minor compaction (all files) 2024-11-20T15:23:02,804 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c7be1c0ab4594b9826cba013e95ae89/C in TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:23:02,804 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/67272dc3f2254da1a4fcc8f1cf842efd, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/b5a08f3821034e08a297c72e67a20111, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/6f9b361eba484b6f80d82d96378da470] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp, totalSize=36.7 K 2024-11-20T15:23:02,805 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 67272dc3f2254da1a4fcc8f1cf842efd, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1732116179248 2024-11-20T15:23:02,805 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting b5a08f3821034e08a297c72e67a20111, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732116179387 2024-11-20T15:23:02,806 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f9b361eba484b6f80d82d96378da470, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1732116180048 2024-11-20T15:23:02,806 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c7be1c0ab4594b9826cba013e95ae89#A#compaction#414 average throughput is 0.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:02,807 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/3eededf653854f0480497556ba7e6284 is 175, key is test_row_0/A:col10/1732116181177/Put/seqid=0 2024-11-20T15:23:02,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201c25de80db6540ab9d438169d76feb9f_3c7be1c0ab4594b9826cba013e95ae89 is 50, key is test_row_0/A:col10/1732116181240/Put/seqid=0 2024-11-20T15:23:02,835 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c7be1c0ab4594b9826cba013e95ae89#C#compaction#417 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:02,835 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/f035ea01ebd146ee83ce2c808783322b is 50, key is test_row_0/C:col10/1732116181177/Put/seqid=0 2024-11-20T15:23:02,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742314_1490 (size=32073) 2024-11-20T15:23:02,857 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/3eededf653854f0480497556ba7e6284 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/3eededf653854f0480497556ba7e6284 2024-11-20T15:23:02,863 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c7be1c0ab4594b9826cba013e95ae89/A of 3c7be1c0ab4594b9826cba013e95ae89 into 3eededf653854f0480497556ba7e6284(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:02,863 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:23:02,863 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89., storeName=3c7be1c0ab4594b9826cba013e95ae89/A, priority=13, startTime=1732116182692; duration=0sec 2024-11-20T15:23:02,863 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:02,863 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c7be1c0ab4594b9826cba013e95ae89:A 2024-11-20T15:23:02,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742315_1491 (size=12454) 2024-11-20T15:23:02,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:02,892 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201c25de80db6540ab9d438169d76feb9f_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201c25de80db6540ab9d438169d76feb9f_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:02,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742316_1492 (size=13119) 2024-11-20T15:23:02,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/2fc105cf79a243058dfd5e1e9624c8d8, store: [table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:23:02,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/2fc105cf79a243058dfd5e1e9624c8d8 is 175, key is test_row_0/A:col10/1732116181240/Put/seqid=0 2024-11-20T15:23:02,900 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/f035ea01ebd146ee83ce2c808783322b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/f035ea01ebd146ee83ce2c808783322b 2024-11-20T15:23:02,905 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c7be1c0ab4594b9826cba013e95ae89/C of 3c7be1c0ab4594b9826cba013e95ae89 into f035ea01ebd146ee83ce2c808783322b(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:02,905 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:23:02,905 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89., storeName=3c7be1c0ab4594b9826cba013e95ae89/C, priority=13, startTime=1732116182693; duration=0sec 2024-11-20T15:23:02,905 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:02,905 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c7be1c0ab4594b9826cba013e95ae89:C 2024-11-20T15:23:02,916 DEBUG [Thread-1754 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x319559be to 127.0.0.1:62338 2024-11-20T15:23:02,916 DEBUG [Thread-1754 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:02,919 DEBUG [Thread-1762 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x47679076 to 127.0.0.1:62338 2024-11-20T15:23:02,919 DEBUG [Thread-1762 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:02,920 DEBUG [Thread-1760 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7819b9e2 to 127.0.0.1:62338 2024-11-20T15:23:02,920 DEBUG [Thread-1760 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:02,921 DEBUG [Thread-1756 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c907e21 to 127.0.0.1:62338 2024-11-20T15:23:02,921 DEBUG [Thread-1756 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:02,924 DEBUG [Thread-1758 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x61ec0f48 to 127.0.0.1:62338 2024-11-20T15:23:02,924 DEBUG [Thread-1758 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:02,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742317_1493 (size=31255) 2024-11-20T15:23:03,324 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=369, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/2fc105cf79a243058dfd5e1e9624c8d8 2024-11-20T15:23:03,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/e146ac1af3a949609c23882fc08c6d35 is 50, key is test_row_0/B:col10/1732116181240/Put/seqid=0 2024-11-20T15:23:03,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742318_1494 (size=12301) 2024-11-20T15:23:03,375 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/e146ac1af3a949609c23882fc08c6d35 2024-11-20T15:23:03,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/36dc1d540596489ab183a5c76c8d2306 is 50, key is test_row_0/C:col10/1732116181240/Put/seqid=0 2024-11-20T15:23:03,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. as already flushing 2024-11-20T15:23:03,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:03,405 DEBUG [Thread-1751 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3875c8c5 to 127.0.0.1:62338 2024-11-20T15:23:03,405 DEBUG [Thread-1749 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c54a0d3 to 127.0.0.1:62338 2024-11-20T15:23:03,405 DEBUG [Thread-1751 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:03,405 DEBUG [Thread-1749 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:03,408 DEBUG [Thread-1743 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x75b14fbd to 127.0.0.1:62338 2024-11-20T15:23:03,408 DEBUG [Thread-1743 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:03,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742319_1495 (size=12301) 2024-11-20T15:23:03,411 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/36dc1d540596489ab183a5c76c8d2306 2024-11-20T15:23:03,412 DEBUG [Thread-1745 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62f74604 to 127.0.0.1:62338 2024-11-20T15:23:03,412 DEBUG [Thread-1745 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:03,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/2fc105cf79a243058dfd5e1e9624c8d8 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/2fc105cf79a243058dfd5e1e9624c8d8 2024-11-20T15:23:03,422 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/2fc105cf79a243058dfd5e1e9624c8d8, entries=150, sequenceid=369, filesize=30.5 K 2024-11-20T15:23:03,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/e146ac1af3a949609c23882fc08c6d35 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/e146ac1af3a949609c23882fc08c6d35 2024-11-20T15:23:03,426 DEBUG [Thread-1747 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x49e13594 to 127.0.0.1:62338 2024-11-20T15:23:03,426 DEBUG [Thread-1747 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:03,429 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/e146ac1af3a949609c23882fc08c6d35, entries=150, sequenceid=369, filesize=12.0 K 2024-11-20T15:23:03,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/36dc1d540596489ab183a5c76c8d2306 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/36dc1d540596489ab183a5c76c8d2306 2024-11-20T15:23:03,432 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/36dc1d540596489ab183a5c76c8d2306, entries=150, sequenceid=369, filesize=12.0 K 2024-11-20T15:23:03,433 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=33.54 KB/34350 for 3c7be1c0ab4594b9826cba013e95ae89 in 651ms, sequenceid=369, compaction requested=false 2024-11-20T15:23:03,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2538): Flush status journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:23:03,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:23:03,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=127 2024-11-20T15:23:03,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=127 2024-11-20T15:23:03,435 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-11-20T15:23:03,435 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8810 sec 2024-11-20T15:23:03,437 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees in 1.8850 sec 2024-11-20T15:23:03,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T15:23:03,658 INFO [Thread-1753 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-11-20T15:23:03,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T15:23:03,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-11-20T15:23:03,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 56 2024-11-20T15:23:03,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 59 2024-11-20T15:23:03,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 59 2024-11-20T15:23:03,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 57 2024-11-20T15:23:03,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T15:23:03,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T15:23:03,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1754 2024-11-20T15:23:03,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5262 rows 2024-11-20T15:23:03,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1753 2024-11-20T15:23:03,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5259 rows 2024-11-20T15:23:03,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1774 2024-11-20T15:23:03,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5322 rows 2024-11-20T15:23:03,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1754 2024-11-20T15:23:03,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5262 rows 2024-11-20T15:23:03,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1740 2024-11-20T15:23:03,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5220 rows 2024-11-20T15:23:03,659 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T15:23:03,659 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0bf5e2f0 to 127.0.0.1:62338 2024-11-20T15:23:03,659 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:03,664 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T15:23:03,665 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T15:23:03,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T15:23:03,668 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732116183668"}]},"ts":"1732116183668"} 2024-11-20T15:23:03,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T15:23:03,672 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T15:23:03,675 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T15:23:03,675 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T15:23:03,677 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=130, ppid=129, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3c7be1c0ab4594b9826cba013e95ae89, UNASSIGN}] 2024-11-20T15:23:03,678 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=130, ppid=129, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3c7be1c0ab4594b9826cba013e95ae89, UNASSIGN 2024-11-20T15:23:03,679 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=130 updating hbase:meta row=3c7be1c0ab4594b9826cba013e95ae89, regionState=CLOSING, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:23:03,680 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T15:23:03,680 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; CloseRegionProcedure 3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954}] 2024-11-20T15:23:03,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T15:23:03,831 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:03,832 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] handler.UnassignRegionHandler(124): Close 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:03,832 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T15:23:03,832 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(1681): Closing 3c7be1c0ab4594b9826cba013e95ae89, disabling compactions & flushes 2024-11-20T15:23:03,832 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:23:03,832 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:23:03,832 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. after waiting 0 ms 2024-11-20T15:23:03,832 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:23:03,833 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(2837): Flushing 3c7be1c0ab4594b9826cba013e95ae89 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T15:23:03,833 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=A 2024-11-20T15:23:03,833 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:03,833 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=B 2024-11-20T15:23:03,833 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:03,833 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c7be1c0ab4594b9826cba013e95ae89, store=C 2024-11-20T15:23:03,833 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:03,840 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120973ff36cfa7f49e89a451acdadeea17a_3c7be1c0ab4594b9826cba013e95ae89 is 50, key is test_row_0/A:col10/1732116183411/Put/seqid=0 2024-11-20T15:23:03,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742320_1496 (size=12454) 2024-11-20T15:23:03,859 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:03,864 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120973ff36cfa7f49e89a451acdadeea17a_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120973ff36cfa7f49e89a451acdadeea17a_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:03,865 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/c496c2b63ba8435e9ef4a34b4571ccd1, store: [table=TestAcidGuarantees family=A region=3c7be1c0ab4594b9826cba013e95ae89] 2024-11-20T15:23:03,866 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/c496c2b63ba8435e9ef4a34b4571ccd1 is 175, key is test_row_0/A:col10/1732116183411/Put/seqid=0 2024-11-20T15:23:03,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742321_1497 (size=31255) 2024-11-20T15:23:03,871 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=380, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/c496c2b63ba8435e9ef4a34b4571ccd1 2024-11-20T15:23:03,883 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/d325171ec3ad4d87a81bc4e62f97dea1 is 50, key is test_row_0/B:col10/1732116183411/Put/seqid=0 2024-11-20T15:23:03,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742322_1498 (size=12301) 2024-11-20T15:23:03,902 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=380 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/d325171ec3ad4d87a81bc4e62f97dea1 2024-11-20T15:23:03,908 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/68b1d16c28cf43c59dba416df3af4de5 is 50, key is test_row_0/C:col10/1732116183411/Put/seqid=0 2024-11-20T15:23:03,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742323_1499 (size=12301) 2024-11-20T15:23:03,913 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=380 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/68b1d16c28cf43c59dba416df3af4de5 2024-11-20T15:23:03,917 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/A/c496c2b63ba8435e9ef4a34b4571ccd1 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/c496c2b63ba8435e9ef4a34b4571ccd1 2024-11-20T15:23:03,920 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/c496c2b63ba8435e9ef4a34b4571ccd1, entries=150, sequenceid=380, filesize=30.5 K 2024-11-20T15:23:03,921 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/B/d325171ec3ad4d87a81bc4e62f97dea1 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/d325171ec3ad4d87a81bc4e62f97dea1 2024-11-20T15:23:03,924 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/d325171ec3ad4d87a81bc4e62f97dea1, entries=150, sequenceid=380, filesize=12.0 K 2024-11-20T15:23:03,925 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/.tmp/C/68b1d16c28cf43c59dba416df3af4de5 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/68b1d16c28cf43c59dba416df3af4de5 2024-11-20T15:23:03,928 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/68b1d16c28cf43c59dba416df3af4de5, entries=150, sequenceid=380, filesize=12.0 K 2024-11-20T15:23:03,928 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 3c7be1c0ab4594b9826cba013e95ae89 in 96ms, sequenceid=380, compaction requested=true 2024-11-20T15:23:03,929 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/c8723bf1d09d4698a39f57d95fbc3ed5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/741d399152ee4f64908924faae14e2b0, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/90d777cae7f443d59ba5feae3bdc068d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/5ba3e29a926c40b095401f36dd2d1436, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/0c43f708a3244a26afb00de05a7971fa, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/8365611f7a6e4147bb0baeaec77ad295, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/cc7ba80a95f24d6f91736e61d6e3e867, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/291cc92b77a042008d70912448733b57, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/0c9f59a0a0ad4e59a3e57346c6ec60dc, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/762d89b668624e26ad52a465cd474fc3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/4094299df5154c2e8318006ac4d05c3c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/d2c125513c2243ba9657966db967c6c9, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/9fe93e31ad994eeaa1b99ffff5041836, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/4ce0b484c2274d57a233a0efe707552b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/466e38e368ab403e9d477a4345e96626, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/fdf0f39cb56044fcafb86c14fa7273e6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/e600177b17c8423e90fde2eb56b343ac, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/8eef99f40044434a90d90e15284e8004, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/853acd2be98c46d6838b0f6284ce9bcd, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/f3e2783087464ea9ad64502f1fbaeee9, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/e5144331b7664e27829a3874a35c79e0, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/88de721f061b433287b0e2a88f7dbf66, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/c252dbc5e92346fba705565bdc7f556a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/d354c54af1714f939c61b51facf10b6a] to archive 2024-11-20T15:23:03,930 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T15:23:03,931 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/c8723bf1d09d4698a39f57d95fbc3ed5 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/c8723bf1d09d4698a39f57d95fbc3ed5 2024-11-20T15:23:03,932 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/741d399152ee4f64908924faae14e2b0 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/741d399152ee4f64908924faae14e2b0 2024-11-20T15:23:03,933 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/90d777cae7f443d59ba5feae3bdc068d to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/90d777cae7f443d59ba5feae3bdc068d 2024-11-20T15:23:03,934 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/5ba3e29a926c40b095401f36dd2d1436 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/5ba3e29a926c40b095401f36dd2d1436 2024-11-20T15:23:03,935 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/0c43f708a3244a26afb00de05a7971fa to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/0c43f708a3244a26afb00de05a7971fa 2024-11-20T15:23:03,936 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/8365611f7a6e4147bb0baeaec77ad295 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/8365611f7a6e4147bb0baeaec77ad295 2024-11-20T15:23:03,937 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/cc7ba80a95f24d6f91736e61d6e3e867 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/cc7ba80a95f24d6f91736e61d6e3e867 2024-11-20T15:23:03,938 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/291cc92b77a042008d70912448733b57 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/291cc92b77a042008d70912448733b57 2024-11-20T15:23:03,939 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/0c9f59a0a0ad4e59a3e57346c6ec60dc to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/0c9f59a0a0ad4e59a3e57346c6ec60dc 2024-11-20T15:23:03,940 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/762d89b668624e26ad52a465cd474fc3 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/762d89b668624e26ad52a465cd474fc3 2024-11-20T15:23:03,941 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/4094299df5154c2e8318006ac4d05c3c to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/4094299df5154c2e8318006ac4d05c3c 2024-11-20T15:23:03,942 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/d2c125513c2243ba9657966db967c6c9 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/d2c125513c2243ba9657966db967c6c9 2024-11-20T15:23:03,943 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/9fe93e31ad994eeaa1b99ffff5041836 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/9fe93e31ad994eeaa1b99ffff5041836 2024-11-20T15:23:03,944 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/4ce0b484c2274d57a233a0efe707552b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/4ce0b484c2274d57a233a0efe707552b 2024-11-20T15:23:03,945 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/466e38e368ab403e9d477a4345e96626 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/466e38e368ab403e9d477a4345e96626 2024-11-20T15:23:03,946 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/fdf0f39cb56044fcafb86c14fa7273e6 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/fdf0f39cb56044fcafb86c14fa7273e6 2024-11-20T15:23:03,947 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/e600177b17c8423e90fde2eb56b343ac to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/e600177b17c8423e90fde2eb56b343ac 2024-11-20T15:23:03,948 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/8eef99f40044434a90d90e15284e8004 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/8eef99f40044434a90d90e15284e8004 2024-11-20T15:23:03,949 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/853acd2be98c46d6838b0f6284ce9bcd to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/853acd2be98c46d6838b0f6284ce9bcd 2024-11-20T15:23:03,951 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/f3e2783087464ea9ad64502f1fbaeee9 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/f3e2783087464ea9ad64502f1fbaeee9 2024-11-20T15:23:03,952 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/e5144331b7664e27829a3874a35c79e0 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/e5144331b7664e27829a3874a35c79e0 2024-11-20T15:23:03,953 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/88de721f061b433287b0e2a88f7dbf66 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/88de721f061b433287b0e2a88f7dbf66 2024-11-20T15:23:03,954 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/c252dbc5e92346fba705565bdc7f556a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/c252dbc5e92346fba705565bdc7f556a 2024-11-20T15:23:03,955 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/d354c54af1714f939c61b51facf10b6a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/d354c54af1714f939c61b51facf10b6a 2024-11-20T15:23:03,957 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/ffccafe111234a9c8778021c9a9fe137, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/00022e7fc4a94c4db59bf7d19e5a8c63, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/29c098925c6d44ddba76604907fa55c0, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/ccc7875f0b744651a44d0c049f39e137, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/43675b55b77e43b0939af50b6612bf6f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/93a2198f36d7470eb32c245eaac29c96, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c1a4d50b1aeb443f8af82a9420a886e1, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/2b07c6d5ca554cccac3f5cec9544da39, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/22892832d1884071a668b9bd1520220a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/13196a89aa1f426db4c153b3df1f78bc, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/5b82cdc84caf4f75b21d57da2e3e4e82, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/2341df51bb5b4431bdde11feb0d3a00e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c1391986f6d247bca17deaf1716b6190, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/b64d95fc54834ef98e594f0c00e923f7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/2c55c4ae9df74483bf715b7840282919, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/38a648fe9d9044b180ef1f9144ddde65, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/f7d5542750af4512ab70c1b271aba1fd, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/240fb8ffe404407197a1ce9ff0bab67f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c6a3530125d945fca08ddbcf1e7ef52d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/49dfbf141918458b96e5e1bc667b3aab, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c490de17848d4ad8b4248c891788a226, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/095fafe913504f599d79b6507bb05176, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/eb420d35a1164b97aee6ec8761a30514, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c40e5672dde14c5b8bc1450ae4a89b13] to archive 2024-11-20T15:23:03,957 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T15:23:03,959 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/ffccafe111234a9c8778021c9a9fe137 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/ffccafe111234a9c8778021c9a9fe137 2024-11-20T15:23:03,960 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/00022e7fc4a94c4db59bf7d19e5a8c63 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/00022e7fc4a94c4db59bf7d19e5a8c63 2024-11-20T15:23:03,961 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/29c098925c6d44ddba76604907fa55c0 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/29c098925c6d44ddba76604907fa55c0 2024-11-20T15:23:03,962 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/ccc7875f0b744651a44d0c049f39e137 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/ccc7875f0b744651a44d0c049f39e137 2024-11-20T15:23:03,963 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/43675b55b77e43b0939af50b6612bf6f to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/43675b55b77e43b0939af50b6612bf6f 2024-11-20T15:23:03,965 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/93a2198f36d7470eb32c245eaac29c96 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/93a2198f36d7470eb32c245eaac29c96 2024-11-20T15:23:03,966 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c1a4d50b1aeb443f8af82a9420a886e1 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c1a4d50b1aeb443f8af82a9420a886e1 2024-11-20T15:23:03,967 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/2b07c6d5ca554cccac3f5cec9544da39 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/2b07c6d5ca554cccac3f5cec9544da39 2024-11-20T15:23:03,968 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/22892832d1884071a668b9bd1520220a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/22892832d1884071a668b9bd1520220a 2024-11-20T15:23:03,969 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/13196a89aa1f426db4c153b3df1f78bc to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/13196a89aa1f426db4c153b3df1f78bc 2024-11-20T15:23:03,970 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/5b82cdc84caf4f75b21d57da2e3e4e82 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/5b82cdc84caf4f75b21d57da2e3e4e82 2024-11-20T15:23:03,970 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/2341df51bb5b4431bdde11feb0d3a00e to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/2341df51bb5b4431bdde11feb0d3a00e 2024-11-20T15:23:03,971 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c1391986f6d247bca17deaf1716b6190 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c1391986f6d247bca17deaf1716b6190 2024-11-20T15:23:03,972 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/b64d95fc54834ef98e594f0c00e923f7 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/b64d95fc54834ef98e594f0c00e923f7 2024-11-20T15:23:03,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T15:23:03,973 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/2c55c4ae9df74483bf715b7840282919 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/2c55c4ae9df74483bf715b7840282919 2024-11-20T15:23:03,974 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/38a648fe9d9044b180ef1f9144ddde65 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/38a648fe9d9044b180ef1f9144ddde65 2024-11-20T15:23:03,975 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/f7d5542750af4512ab70c1b271aba1fd to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/f7d5542750af4512ab70c1b271aba1fd 2024-11-20T15:23:03,977 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/240fb8ffe404407197a1ce9ff0bab67f to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/240fb8ffe404407197a1ce9ff0bab67f 2024-11-20T15:23:03,978 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c6a3530125d945fca08ddbcf1e7ef52d to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c6a3530125d945fca08ddbcf1e7ef52d 2024-11-20T15:23:03,979 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/49dfbf141918458b96e5e1bc667b3aab to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/49dfbf141918458b96e5e1bc667b3aab 2024-11-20T15:23:03,980 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c490de17848d4ad8b4248c891788a226 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c490de17848d4ad8b4248c891788a226 2024-11-20T15:23:03,982 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/095fafe913504f599d79b6507bb05176 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/095fafe913504f599d79b6507bb05176 2024-11-20T15:23:03,983 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/eb420d35a1164b97aee6ec8761a30514 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/eb420d35a1164b97aee6ec8761a30514 2024-11-20T15:23:03,984 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c40e5672dde14c5b8bc1450ae4a89b13 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/c40e5672dde14c5b8bc1450ae4a89b13 2024-11-20T15:23:03,985 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/223eeef8a46d4caf8567879c7c344aa7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/bea896cc5d0a458c92be00710e859795, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/d802e3268aee4bbb8b14819c7a0ffb79, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/b7ef62641f26496f98a05027cc4b5eaa, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/81ce6b4068ba4c318b8d06441a7d001b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/1305cd59f41b4faba7c452733c6fdcff, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/16029e80807b4772a7177353ce88a38b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/9cab56a1c19f43978eb8adc499d73586, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/1689f150a3324020af01337c8405d480, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/8db99bf7b7624a4ebf4c06813b7413c8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/53921b72a99349e19b6e92be10d13498, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/c089c4edf4ed453ca0e7f5bce963a717, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/112aab5035c24982abbc26153d6e2d7b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/01a480b701ec4d719aff248b2117f507, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/50de2c16dc164f95964f74a3efdb3a6f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/3bc0c44987204c27b65fd38d9007cb83, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/2c3baf3b04d342edb2abc79303691b47, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/b8950f349d55434293f1666a0c049efa, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/d4d498f2d42942bbb6455e22b2678f31, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/7699d197878341b1b40ca8c35f89c870, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/67272dc3f2254da1a4fcc8f1cf842efd, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/92c4e240225f42a08cd8454a9dced1ae, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/b5a08f3821034e08a297c72e67a20111, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/6f9b361eba484b6f80d82d96378da470] to archive 2024-11-20T15:23:03,987 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T15:23:03,988 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/223eeef8a46d4caf8567879c7c344aa7 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/223eeef8a46d4caf8567879c7c344aa7 2024-11-20T15:23:03,989 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/bea896cc5d0a458c92be00710e859795 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/bea896cc5d0a458c92be00710e859795 2024-11-20T15:23:03,990 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/d802e3268aee4bbb8b14819c7a0ffb79 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/d802e3268aee4bbb8b14819c7a0ffb79 2024-11-20T15:23:03,991 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/b7ef62641f26496f98a05027cc4b5eaa to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/b7ef62641f26496f98a05027cc4b5eaa 2024-11-20T15:23:03,992 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/81ce6b4068ba4c318b8d06441a7d001b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/81ce6b4068ba4c318b8d06441a7d001b 2024-11-20T15:23:03,994 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/1305cd59f41b4faba7c452733c6fdcff to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/1305cd59f41b4faba7c452733c6fdcff 2024-11-20T15:23:03,995 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/16029e80807b4772a7177353ce88a38b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/16029e80807b4772a7177353ce88a38b 2024-11-20T15:23:03,997 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/9cab56a1c19f43978eb8adc499d73586 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/9cab56a1c19f43978eb8adc499d73586 2024-11-20T15:23:03,999 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/1689f150a3324020af01337c8405d480 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/1689f150a3324020af01337c8405d480 2024-11-20T15:23:03,999 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/8db99bf7b7624a4ebf4c06813b7413c8 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/8db99bf7b7624a4ebf4c06813b7413c8 2024-11-20T15:23:04,000 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/53921b72a99349e19b6e92be10d13498 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/53921b72a99349e19b6e92be10d13498 2024-11-20T15:23:04,001 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/c089c4edf4ed453ca0e7f5bce963a717 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/c089c4edf4ed453ca0e7f5bce963a717 2024-11-20T15:23:04,002 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/112aab5035c24982abbc26153d6e2d7b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/112aab5035c24982abbc26153d6e2d7b 2024-11-20T15:23:04,003 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/01a480b701ec4d719aff248b2117f507 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/01a480b701ec4d719aff248b2117f507 2024-11-20T15:23:04,004 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/50de2c16dc164f95964f74a3efdb3a6f to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/50de2c16dc164f95964f74a3efdb3a6f 2024-11-20T15:23:04,006 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/3bc0c44987204c27b65fd38d9007cb83 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/3bc0c44987204c27b65fd38d9007cb83 2024-11-20T15:23:04,007 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/2c3baf3b04d342edb2abc79303691b47 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/2c3baf3b04d342edb2abc79303691b47 2024-11-20T15:23:04,008 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/b8950f349d55434293f1666a0c049efa to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/b8950f349d55434293f1666a0c049efa 2024-11-20T15:23:04,009 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/d4d498f2d42942bbb6455e22b2678f31 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/d4d498f2d42942bbb6455e22b2678f31 2024-11-20T15:23:04,010 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/7699d197878341b1b40ca8c35f89c870 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/7699d197878341b1b40ca8c35f89c870 2024-11-20T15:23:04,011 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/67272dc3f2254da1a4fcc8f1cf842efd to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/67272dc3f2254da1a4fcc8f1cf842efd 2024-11-20T15:23:04,012 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/92c4e240225f42a08cd8454a9dced1ae to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/92c4e240225f42a08cd8454a9dced1ae 2024-11-20T15:23:04,013 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/b5a08f3821034e08a297c72e67a20111 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/b5a08f3821034e08a297c72e67a20111 2024-11-20T15:23:04,016 DEBUG [StoreCloser-TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/6f9b361eba484b6f80d82d96378da470 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/6f9b361eba484b6f80d82d96378da470 2024-11-20T15:23:04,029 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/recovered.edits/383.seqid, newMaxSeqId=383, maxSeqId=4 2024-11-20T15:23:04,030 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89. 2024-11-20T15:23:04,030 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] regionserver.HRegion(1635): Region close journal for 3c7be1c0ab4594b9826cba013e95ae89: 2024-11-20T15:23:04,031 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=131}] handler.UnassignRegionHandler(170): Closed 3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:04,032 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=130 updating hbase:meta row=3c7be1c0ab4594b9826cba013e95ae89, regionState=CLOSED 2024-11-20T15:23:04,034 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-11-20T15:23:04,034 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; CloseRegionProcedure 3c7be1c0ab4594b9826cba013e95ae89, server=0b62285ead89,33387,1732116069954 in 353 msec 2024-11-20T15:23:04,035 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=130, resume processing ppid=129 2024-11-20T15:23:04,035 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, ppid=129, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3c7be1c0ab4594b9826cba013e95ae89, UNASSIGN in 357 msec 2024-11-20T15:23:04,037 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-11-20T15:23:04,037 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 361 msec 2024-11-20T15:23:04,038 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732116184038"}]},"ts":"1732116184038"} 2024-11-20T15:23:04,039 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T15:23:04,041 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T15:23:04,043 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 377 msec 2024-11-20T15:23:04,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T15:23:04,274 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-11-20T15:23:04,275 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T15:23:04,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:23:04,276 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=132, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:23:04,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T15:23:04,277 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=132, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:23:04,281 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:04,285 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A, FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B, FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C, FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/recovered.edits] 2024-11-20T15:23:04,289 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/2fc105cf79a243058dfd5e1e9624c8d8 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/2fc105cf79a243058dfd5e1e9624c8d8 2024-11-20T15:23:04,301 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/3eededf653854f0480497556ba7e6284 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/3eededf653854f0480497556ba7e6284 2024-11-20T15:23:04,304 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/c496c2b63ba8435e9ef4a34b4571ccd1 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/A/c496c2b63ba8435e9ef4a34b4571ccd1 2024-11-20T15:23:04,308 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/a78ae94f4a9a4417ae92dd57978770e4 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/a78ae94f4a9a4417ae92dd57978770e4 2024-11-20T15:23:04,309 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/d325171ec3ad4d87a81bc4e62f97dea1 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/d325171ec3ad4d87a81bc4e62f97dea1 2024-11-20T15:23:04,311 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/e146ac1af3a949609c23882fc08c6d35 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/B/e146ac1af3a949609c23882fc08c6d35 2024-11-20T15:23:04,313 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/36dc1d540596489ab183a5c76c8d2306 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/36dc1d540596489ab183a5c76c8d2306 2024-11-20T15:23:04,315 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/68b1d16c28cf43c59dba416df3af4de5 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/68b1d16c28cf43c59dba416df3af4de5 2024-11-20T15:23:04,316 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/f035ea01ebd146ee83ce2c808783322b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/C/f035ea01ebd146ee83ce2c808783322b 2024-11-20T15:23:04,319 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/recovered.edits/383.seqid to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89/recovered.edits/383.seqid 2024-11-20T15:23:04,320 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:04,320 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T15:23:04,321 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T15:23:04,322 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T15:23:04,326 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120003b95873d0442fb9a1be6125dd97cf5_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120003b95873d0442fb9a1be6125dd97cf5_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:04,328 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120175935bcd8f04f55ba2ed6dfd8bd1e3f_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120175935bcd8f04f55ba2ed6dfd8bd1e3f_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:04,330 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201c25de80db6540ab9d438169d76feb9f_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201c25de80db6540ab9d438169d76feb9f_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:04,333 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202af79b6e1eac4c5ea7e446540d066aad_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202af79b6e1eac4c5ea7e446540d066aad_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:04,339 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204f81f23af9df43828ff0b0c003b4009a_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204f81f23af9df43828ff0b0c003b4009a_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:04,340 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120608c323bb9b54e3fbc0d6ce7f15f632f_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120608c323bb9b54e3fbc0d6ce7f15f632f_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:04,343 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112066f365dfafc6481d88f76a7675dfda72_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112066f365dfafc6481d88f76a7675dfda72_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:04,345 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112082e5bee926b346f58ce5a8db6f5ac28c_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112082e5bee926b346f58ce5a8db6f5ac28c_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:04,346 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209683b72655ef400bbce929d71a663fb0_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209683b72655ef400bbce929d71a663fb0_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:04,348 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120973ff36cfa7f49e89a451acdadeea17a_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120973ff36cfa7f49e89a451acdadeea17a_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:04,351 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112099c00aea3d254410a0ffde7b6817afdf_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112099c00aea3d254410a0ffde7b6817afdf_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:04,353 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209a424e2e97fb4449ad5941ae9d55f77b_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209a424e2e97fb4449ad5941ae9d55f77b_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:04,356 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a025f0e87d494fb38d500f2b05f5efad_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a025f0e87d494fb38d500f2b05f5efad_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:04,357 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120adae2a6faa6147b2ba08285235495d32_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120adae2a6faa6147b2ba08285235495d32_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:04,359 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b0c1b4a28df84bb8957f25a7bb34e410_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b0c1b4a28df84bb8957f25a7bb34e410_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:04,361 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b2579c8f8f114b6ba0d0ebd802c2d243_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b2579c8f8f114b6ba0d0ebd802c2d243_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:04,362 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c0901f0fab6f43b89c5e7cc45feb7a12_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c0901f0fab6f43b89c5e7cc45feb7a12_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:04,365 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c39de9bdaa1e4b7c9967b7a955c73aac_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c39de9bdaa1e4b7c9967b7a955c73aac_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:04,367 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e643df1f1d89494eb929fc76a65e51b0_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e643df1f1d89494eb929fc76a65e51b0_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:04,369 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f97073ae1086402a9d08a55f0aad0bc8_3c7be1c0ab4594b9826cba013e95ae89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f97073ae1086402a9d08a55f0aad0bc8_3c7be1c0ab4594b9826cba013e95ae89 2024-11-20T15:23:04,370 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T15:23:04,373 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=132, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:23:04,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T15:23:04,385 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T15:23:04,388 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T15:23:04,389 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=132, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:23:04,389 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T15:23:04,390 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732116184389"}]},"ts":"9223372036854775807"} 2024-11-20T15:23:04,397 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T15:23:04,397 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 3c7be1c0ab4594b9826cba013e95ae89, NAME => 'TestAcidGuarantees,,1732116160083.3c7be1c0ab4594b9826cba013e95ae89.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T15:23:04,398 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T15:23:04,398 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732116184398"}]},"ts":"9223372036854775807"} 2024-11-20T15:23:04,406 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T15:23:04,409 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=132, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:23:04,410 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 134 msec 2024-11-20T15:23:04,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T15:23:04,579 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-11-20T15:23:04,590 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=241 (was 239) - Thread LEAK? -, OpenFileDescriptor=461 (was 451) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=683 (was 565) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5596 (was 5771) 2024-11-20T15:23:04,600 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=241, OpenFileDescriptor=461, MaxFileDescriptor=1048576, SystemLoadAverage=683, ProcessCount=11, AvailableMemoryMB=5595 2024-11-20T15:23:04,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T15:23:04,602 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T15:23:04,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=133, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T15:23:04,603 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=133, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T15:23:04,604 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:04,604 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 133 2024-11-20T15:23:04,604 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=133, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T15:23:04,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=133 2024-11-20T15:23:04,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742324_1500 (size=960) 2024-11-20T15:23:04,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=133 2024-11-20T15:23:04,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=133 2024-11-20T15:23:05,011 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3 2024-11-20T15:23:05,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742325_1501 (size=53) 2024-11-20T15:23:05,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=133 2024-11-20T15:23:05,418 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T15:23:05,418 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 558f4ab8fec4b16865ece4459134acf9, disabling compactions & flushes 2024-11-20T15:23:05,418 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:05,418 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:05,418 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. after waiting 0 ms 2024-11-20T15:23:05,418 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:05,418 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:05,418 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:05,420 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=133, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T15:23:05,420 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732116185420"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732116185420"}]},"ts":"1732116185420"} 2024-11-20T15:23:05,424 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T15:23:05,425 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=133, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T15:23:05,425 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732116185425"}]},"ts":"1732116185425"} 2024-11-20T15:23:05,434 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T15:23:05,439 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=134, ppid=133, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=558f4ab8fec4b16865ece4459134acf9, ASSIGN}] 2024-11-20T15:23:05,440 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=134, ppid=133, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=558f4ab8fec4b16865ece4459134acf9, ASSIGN 2024-11-20T15:23:05,441 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=134, ppid=133, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=558f4ab8fec4b16865ece4459134acf9, ASSIGN; state=OFFLINE, location=0b62285ead89,33387,1732116069954; forceNewPlan=false, retain=false 2024-11-20T15:23:05,591 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=134 updating hbase:meta row=558f4ab8fec4b16865ece4459134acf9, regionState=OPENING, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:23:05,593 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; OpenRegionProcedure 558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954}] 2024-11-20T15:23:05,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=133 2024-11-20T15:23:05,744 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:05,747 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:05,748 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7285): Opening region: {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} 2024-11-20T15:23:05,748 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:05,748 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T15:23:05,748 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7327): checking encryption for 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:05,748 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7330): checking classloading for 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:05,764 INFO [StoreOpener-558f4ab8fec4b16865ece4459134acf9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:05,766 INFO [StoreOpener-558f4ab8fec4b16865ece4459134acf9-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:23:05,766 INFO [StoreOpener-558f4ab8fec4b16865ece4459134acf9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 558f4ab8fec4b16865ece4459134acf9 columnFamilyName A 2024-11-20T15:23:05,766 DEBUG [StoreOpener-558f4ab8fec4b16865ece4459134acf9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:05,767 INFO [StoreOpener-558f4ab8fec4b16865ece4459134acf9-1 {}] regionserver.HStore(327): Store=558f4ab8fec4b16865ece4459134acf9/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:23:05,767 INFO [StoreOpener-558f4ab8fec4b16865ece4459134acf9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:05,769 INFO [StoreOpener-558f4ab8fec4b16865ece4459134acf9-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:23:05,769 INFO [StoreOpener-558f4ab8fec4b16865ece4459134acf9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 558f4ab8fec4b16865ece4459134acf9 columnFamilyName B 2024-11-20T15:23:05,770 DEBUG [StoreOpener-558f4ab8fec4b16865ece4459134acf9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:05,770 INFO [StoreOpener-558f4ab8fec4b16865ece4459134acf9-1 {}] regionserver.HStore(327): Store=558f4ab8fec4b16865ece4459134acf9/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:23:05,770 INFO [StoreOpener-558f4ab8fec4b16865ece4459134acf9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:05,772 INFO [StoreOpener-558f4ab8fec4b16865ece4459134acf9-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:23:05,773 INFO [StoreOpener-558f4ab8fec4b16865ece4459134acf9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 558f4ab8fec4b16865ece4459134acf9 columnFamilyName C 2024-11-20T15:23:05,773 DEBUG [StoreOpener-558f4ab8fec4b16865ece4459134acf9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:05,773 INFO [StoreOpener-558f4ab8fec4b16865ece4459134acf9-1 {}] regionserver.HStore(327): Store=558f4ab8fec4b16865ece4459134acf9/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:23:05,773 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:05,774 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:05,775 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:05,777 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T15:23:05,778 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1085): writing seq id for 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:05,786 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T15:23:05,787 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1102): Opened 558f4ab8fec4b16865ece4459134acf9; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71321854, jitterRate=0.06277844309806824}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T15:23:05,788 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1001): Region open journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:05,789 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., pid=135, masterSystemTime=1732116185744 2024-11-20T15:23:05,791 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:05,791 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:05,791 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=134 updating hbase:meta row=558f4ab8fec4b16865ece4459134acf9, regionState=OPEN, openSeqNum=2, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:23:05,794 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-11-20T15:23:05,794 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; OpenRegionProcedure 558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 in 199 msec 2024-11-20T15:23:05,795 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=134, resume processing ppid=133 2024-11-20T15:23:05,795 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, ppid=133, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=558f4ab8fec4b16865ece4459134acf9, ASSIGN in 355 msec 2024-11-20T15:23:05,796 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=133, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T15:23:05,796 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732116185796"}]},"ts":"1732116185796"} 2024-11-20T15:23:05,797 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T15:23:05,800 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=133, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T15:23:05,802 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1980 sec 2024-11-20T15:23:06,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=133 2024-11-20T15:23:06,709 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 133 completed 2024-11-20T15:23:06,711 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x63cefe40 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@32c12a30 2024-11-20T15:23:06,715 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79b10416, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:23:06,717 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:23:06,720 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41416, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:23:06,721 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T15:23:06,722 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53516, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T15:23:06,724 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x65df2359 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5ef40578 2024-11-20T15:23:06,728 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f142b04, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:23:06,729 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7d0ab200 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@32bb71c 2024-11-20T15:23:06,732 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@de9f076, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:23:06,733 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5871c039 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bc0f7c 2024-11-20T15:23:06,736 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4414259d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:23:06,737 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7daa5922 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1b8b6e04 2024-11-20T15:23:06,740 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ed69825, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:23:06,741 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b7f20c4 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5bc486e1 2024-11-20T15:23:06,745 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11193a0c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:23:06,746 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2070263a to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7861b162 2024-11-20T15:23:06,750 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cf40102, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:23:06,751 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6050584c to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@154f0f85 2024-11-20T15:23:06,759 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@496fe03f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:23:06,760 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6dd48863 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8a917b 2024-11-20T15:23:06,764 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3652e74d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:23:06,764 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x51196534 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@54c2725 2024-11-20T15:23:06,773 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2405c04e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:23:06,774 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1dc5e114 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79d49886 2024-11-20T15:23:06,785 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73d92042, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:23:06,804 DEBUG [hconnection-0x7708d45f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:23:06,804 DEBUG [hconnection-0x26459880-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:23:06,806 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41432, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:23:06,806 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41430, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:23:06,807 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:23:06,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-11-20T15:23:06,808 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:23:06,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T15:23:06,809 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:23:06,809 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:23:06,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:06,814 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T15:23:06,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:06,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:06,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:06,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:06,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:06,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:06,820 DEBUG [hconnection-0x239fc8f6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:23:06,821 DEBUG [hconnection-0x20204489-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:23:06,822 DEBUG [hconnection-0x65612578-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:23:06,822 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41446, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:23:06,822 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41462, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:23:06,822 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41464, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:23:06,835 DEBUG [hconnection-0x72dbdd0e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:23:06,836 DEBUG [hconnection-0x3784265c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:23:06,837 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41474, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:23:06,837 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41482, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:23:06,839 DEBUG [hconnection-0x43f0e0f2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:23:06,841 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41486, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:23:06,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:06,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116246843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:06,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:06,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116246843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:06,844 DEBUG [hconnection-0x631c4d38-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:23:06,846 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:06,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116246845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:06,847 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/a21aeb7baff147ef9ea7d045f2f98a14 is 50, key is test_row_0/A:col10/1732116186812/Put/seqid=0 2024-11-20T15:23:06,847 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:06,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116246847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:06,847 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41496, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:23:06,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:06,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116246848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:06,851 DEBUG [hconnection-0x29816763-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:23:06,852 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41502, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:23:06,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742326_1502 (size=12001) 2024-11-20T15:23:06,872 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/a21aeb7baff147ef9ea7d045f2f98a14 2024-11-20T15:23:06,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T15:23:06,912 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/75a44a59c572433ca14e256d130cd4ff is 50, key is test_row_0/B:col10/1732116186812/Put/seqid=0 2024-11-20T15:23:06,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742327_1503 (size=12001) 2024-11-20T15:23:06,921 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/75a44a59c572433ca14e256d130cd4ff 2024-11-20T15:23:06,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:06,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116246945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:06,946 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:06,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116246945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:06,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:06,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116246947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:06,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:06,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116246948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:06,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:06,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116246950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:06,961 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:06,962 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T15:23:06,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:06,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:06,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:06,962 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:06,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:06,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:06,965 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/6d3292b4c1cc4bed904225a9ed32f47a is 50, key is test_row_0/C:col10/1732116186812/Put/seqid=0 2024-11-20T15:23:06,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742328_1504 (size=12001) 2024-11-20T15:23:06,995 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/6d3292b4c1cc4bed904225a9ed32f47a 2024-11-20T15:23:07,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/a21aeb7baff147ef9ea7d045f2f98a14 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/a21aeb7baff147ef9ea7d045f2f98a14 2024-11-20T15:23:07,010 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/a21aeb7baff147ef9ea7d045f2f98a14, entries=150, sequenceid=12, filesize=11.7 K 2024-11-20T15:23:07,011 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/75a44a59c572433ca14e256d130cd4ff as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/75a44a59c572433ca14e256d130cd4ff 2024-11-20T15:23:07,018 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/75a44a59c572433ca14e256d130cd4ff, entries=150, sequenceid=12, filesize=11.7 K 2024-11-20T15:23:07,019 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/6d3292b4c1cc4bed904225a9ed32f47a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/6d3292b4c1cc4bed904225a9ed32f47a 2024-11-20T15:23:07,026 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/6d3292b4c1cc4bed904225a9ed32f47a, entries=150, sequenceid=12, filesize=11.7 K 2024-11-20T15:23:07,027 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 558f4ab8fec4b16865ece4459134acf9 in 212ms, sequenceid=12, compaction requested=false 2024-11-20T15:23:07,027 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:07,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T15:23:07,114 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,115 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T15:23:07,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:07,115 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T15:23:07,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:07,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:07,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:07,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:07,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:07,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:07,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/cbbb15d8c3ca42278a5d345df508605b is 50, key is test_row_0/A:col10/1732116186841/Put/seqid=0 2024-11-20T15:23:07,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742329_1505 (size=12001) 2024-11-20T15:23:07,140 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/cbbb15d8c3ca42278a5d345df508605b 2024-11-20T15:23:07,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:07,151 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:07,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/0b00a44d371145779f736298771f49d7 is 50, key is test_row_0/B:col10/1732116186841/Put/seqid=0 2024-11-20T15:23:07,184 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:07,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116247173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,185 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:07,185 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:07,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116247173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116247173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742330_1506 (size=12001) 2024-11-20T15:23:07,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:07,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116247174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:07,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116247176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,187 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/0b00a44d371145779f736298771f49d7 2024-11-20T15:23:07,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/d4927b004a4d49acb63135826420ce02 is 50, key is test_row_0/C:col10/1732116186841/Put/seqid=0 2024-11-20T15:23:07,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742331_1507 (size=12001) 2024-11-20T15:23:07,219 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/d4927b004a4d49acb63135826420ce02 2024-11-20T15:23:07,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/cbbb15d8c3ca42278a5d345df508605b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/cbbb15d8c3ca42278a5d345df508605b 2024-11-20T15:23:07,241 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/cbbb15d8c3ca42278a5d345df508605b, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T15:23:07,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/0b00a44d371145779f736298771f49d7 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/0b00a44d371145779f736298771f49d7 2024-11-20T15:23:07,245 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/0b00a44d371145779f736298771f49d7, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T15:23:07,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/d4927b004a4d49acb63135826420ce02 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/d4927b004a4d49acb63135826420ce02 2024-11-20T15:23:07,252 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/d4927b004a4d49acb63135826420ce02, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T15:23:07,253 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 558f4ab8fec4b16865ece4459134acf9 in 138ms, sequenceid=37, compaction requested=false 2024-11-20T15:23:07,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:07,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:07,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-20T15:23:07,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-11-20T15:23:07,255 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-20T15:23:07,255 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 445 msec 2024-11-20T15:23:07,257 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 449 msec 2024-11-20T15:23:07,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:07,291 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T15:23:07,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:07,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:07,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:07,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:07,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:07,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:07,296 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/132220622be4447cb08d06f2460ae7d5 is 50, key is test_row_0/A:col10/1732116187289/Put/seqid=0 2024-11-20T15:23:07,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742332_1508 (size=12001) 2024-11-20T15:23:07,322 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/132220622be4447cb08d06f2460ae7d5 2024-11-20T15:23:07,332 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/ad1aff44c70445dbaafe987f813d3fa2 is 50, key is test_row_0/B:col10/1732116187289/Put/seqid=0 2024-11-20T15:23:07,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742333_1509 (size=12001) 2024-11-20T15:23:07,359 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/ad1aff44c70445dbaafe987f813d3fa2 2024-11-20T15:23:07,367 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:07,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116247350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,367 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:07,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116247350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,367 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:07,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116247350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,369 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/1351ed3e93464d93be366999c242faa4 is 50, key is test_row_0/C:col10/1732116187289/Put/seqid=0 2024-11-20T15:23:07,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:07,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116247362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,377 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:07,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116247367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742334_1510 (size=12001) 2024-11-20T15:23:07,408 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/1351ed3e93464d93be366999c242faa4 2024-11-20T15:23:07,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T15:23:07,411 INFO [Thread-2222 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-11-20T15:23:07,413 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:23:07,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-11-20T15:23:07,415 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:23:07,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T15:23:07,416 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:23:07,416 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:23:07,418 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/132220622be4447cb08d06f2460ae7d5 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/132220622be4447cb08d06f2460ae7d5 2024-11-20T15:23:07,422 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/132220622be4447cb08d06f2460ae7d5, entries=150, sequenceid=49, filesize=11.7 K 2024-11-20T15:23:07,424 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/ad1aff44c70445dbaafe987f813d3fa2 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/ad1aff44c70445dbaafe987f813d3fa2 2024-11-20T15:23:07,428 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/ad1aff44c70445dbaafe987f813d3fa2, entries=150, sequenceid=49, filesize=11.7 K 2024-11-20T15:23:07,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/1351ed3e93464d93be366999c242faa4 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/1351ed3e93464d93be366999c242faa4 2024-11-20T15:23:07,441 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/1351ed3e93464d93be366999c242faa4, entries=150, sequenceid=49, filesize=11.7 K 2024-11-20T15:23:07,442 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 558f4ab8fec4b16865ece4459134acf9 in 151ms, sequenceid=49, compaction requested=true 2024-11-20T15:23:07,442 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:07,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:23:07,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:07,442 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:07,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:23:07,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:07,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:23:07,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:07,442 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:07,443 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:07,443 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/A is initiating minor compaction (all files) 2024-11-20T15:23:07,443 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/A in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:07,443 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/a21aeb7baff147ef9ea7d045f2f98a14, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/cbbb15d8c3ca42278a5d345df508605b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/132220622be4447cb08d06f2460ae7d5] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=35.2 K 2024-11-20T15:23:07,444 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:07,444 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting a21aeb7baff147ef9ea7d045f2f98a14, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732116186812 2024-11-20T15:23:07,444 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/B is initiating minor compaction (all files) 2024-11-20T15:23:07,445 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/B in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:07,445 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/75a44a59c572433ca14e256d130cd4ff, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/0b00a44d371145779f736298771f49d7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/ad1aff44c70445dbaafe987f813d3fa2] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=35.2 K 2024-11-20T15:23:07,445 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting cbbb15d8c3ca42278a5d345df508605b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732116186836 2024-11-20T15:23:07,445 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 75a44a59c572433ca14e256d130cd4ff, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732116186812 2024-11-20T15:23:07,445 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 132220622be4447cb08d06f2460ae7d5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732116187173 2024-11-20T15:23:07,445 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b00a44d371145779f736298771f49d7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732116186836 2024-11-20T15:23:07,446 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting ad1aff44c70445dbaafe987f813d3fa2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732116187173 2024-11-20T15:23:07,464 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#A#compaction#432 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:07,464 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/d957cb7b67b9421c854226e367ba72bd is 50, key is test_row_0/A:col10/1732116187289/Put/seqid=0 2024-11-20T15:23:07,465 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#B#compaction#433 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:07,465 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/343df17aeb7642ef9cf4b0d0a6f9cb0a is 50, key is test_row_0/B:col10/1732116187289/Put/seqid=0 2024-11-20T15:23:07,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:07,477 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T15:23:07,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:07,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:07,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:07,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:07,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:07,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:07,496 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:07,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116247487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742335_1511 (size=12104) 2024-11-20T15:23:07,506 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/9ae06e262b04464a9710ac3300060002 is 50, key is test_row_0/A:col10/1732116187349/Put/seqid=0 2024-11-20T15:23:07,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:07,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116247492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,511 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:07,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116247495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:07,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116247495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:07,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116247498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T15:23:07,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742336_1512 (size=12104) 2024-11-20T15:23:07,566 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/343df17aeb7642ef9cf4b0d0a6f9cb0a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/343df17aeb7642ef9cf4b0d0a6f9cb0a 2024-11-20T15:23:07,567 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,568 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T15:23:07,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:07,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:07,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:07,569 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:07,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:07,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:07,572 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/B of 558f4ab8fec4b16865ece4459134acf9 into 343df17aeb7642ef9cf4b0d0a6f9cb0a(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:07,572 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:07,572 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/B, priority=13, startTime=1732116187442; duration=0sec 2024-11-20T15:23:07,572 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:07,573 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:B 2024-11-20T15:23:07,573 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:07,575 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:07,575 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/C is initiating minor compaction (all files) 2024-11-20T15:23:07,575 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/C in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:07,575 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/6d3292b4c1cc4bed904225a9ed32f47a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/d4927b004a4d49acb63135826420ce02, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/1351ed3e93464d93be366999c242faa4] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=35.2 K 2024-11-20T15:23:07,577 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d3292b4c1cc4bed904225a9ed32f47a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732116186812 2024-11-20T15:23:07,578 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting d4927b004a4d49acb63135826420ce02, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732116186836 2024-11-20T15:23:07,578 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 1351ed3e93464d93be366999c242faa4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732116187173 2024-11-20T15:23:07,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742337_1513 (size=14341) 2024-11-20T15:23:07,595 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#C#compaction#435 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:07,596 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/6c314f7a1f9a4cb1ada4586ea6783b22 is 50, key is test_row_0/C:col10/1732116187289/Put/seqid=0 2024-11-20T15:23:07,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:07,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116247598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,616 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:07,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116247612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,616 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:07,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116247612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,616 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:07,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116247613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:07,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116247613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742338_1514 (size=12104) 2024-11-20T15:23:07,645 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/6c314f7a1f9a4cb1ada4586ea6783b22 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/6c314f7a1f9a4cb1ada4586ea6783b22 2024-11-20T15:23:07,651 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/C of 558f4ab8fec4b16865ece4459134acf9 into 6c314f7a1f9a4cb1ada4586ea6783b22(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:07,651 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:07,651 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/C, priority=13, startTime=1732116187442; duration=0sec 2024-11-20T15:23:07,651 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:07,651 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:C 2024-11-20T15:23:07,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T15:23:07,722 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,722 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T15:23:07,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:07,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:07,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:07,723 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:07,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:07,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:07,814 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:07,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116247810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:07,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116247817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,824 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:07,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116247818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,827 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:07,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116247820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,832 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:07,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116247826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,874 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:07,874 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T15:23:07,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:07,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:07,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:07,875 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:07,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:07,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:07,904 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/d957cb7b67b9421c854226e367ba72bd as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/d957cb7b67b9421c854226e367ba72bd 2024-11-20T15:23:07,916 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/A of 558f4ab8fec4b16865ece4459134acf9 into d957cb7b67b9421c854226e367ba72bd(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:07,916 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:07,916 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/A, priority=13, startTime=1732116187442; duration=0sec 2024-11-20T15:23:07,916 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:07,917 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:A 2024-11-20T15:23:07,982 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/9ae06e262b04464a9710ac3300060002 2024-11-20T15:23:08,010 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/27cb99f5aa15463badec3cdc882cfa21 is 50, key is test_row_0/B:col10/1732116187349/Put/seqid=0 2024-11-20T15:23:08,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T15:23:08,027 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:08,028 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T15:23:08,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:08,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:08,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:08,029 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:08,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:08,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:08,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742339_1515 (size=12001) 2024-11-20T15:23:08,079 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/27cb99f5aa15463badec3cdc882cfa21 2024-11-20T15:23:08,091 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/53c42a997651408d9d529fec9a50dde3 is 50, key is test_row_0/C:col10/1732116187349/Put/seqid=0 2024-11-20T15:23:08,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742340_1516 (size=12001) 2024-11-20T15:23:08,103 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/53c42a997651408d9d529fec9a50dde3 2024-11-20T15:23:08,109 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/9ae06e262b04464a9710ac3300060002 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/9ae06e262b04464a9710ac3300060002 2024-11-20T15:23:08,116 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/9ae06e262b04464a9710ac3300060002, entries=200, sequenceid=74, filesize=14.0 K 2024-11-20T15:23:08,118 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/27cb99f5aa15463badec3cdc882cfa21 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/27cb99f5aa15463badec3cdc882cfa21 2024-11-20T15:23:08,122 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:08,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116248118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:08,123 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/27cb99f5aa15463badec3cdc882cfa21, entries=150, sequenceid=74, filesize=11.7 K 2024-11-20T15:23:08,125 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/53c42a997651408d9d529fec9a50dde3 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/53c42a997651408d9d529fec9a50dde3 2024-11-20T15:23:08,129 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:08,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116248125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:08,130 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/53c42a997651408d9d529fec9a50dde3, entries=150, sequenceid=74, filesize=11.7 K 2024-11-20T15:23:08,131 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 558f4ab8fec4b16865ece4459134acf9 in 655ms, sequenceid=74, compaction requested=false 2024-11-20T15:23:08,131 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:08,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:08,136 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T15:23:08,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:08,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:08,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:08,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:08,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:08,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:08,145 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/5c673977bd054255a12ef04ae0c37fa6 is 50, key is test_row_0/A:col10/1732116187491/Put/seqid=0 2024-11-20T15:23:08,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742341_1517 (size=12001) 2024-11-20T15:23:08,181 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:08,182 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T15:23:08,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:08,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:08,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:08,182 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:08,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:08,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:08,200 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:08,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116248192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:08,205 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:08,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116248197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:08,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:08,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116248200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:08,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:08,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116248302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:08,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:08,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116248307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:08,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:08,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116248307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:08,335 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:08,335 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T15:23:08,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:08,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:08,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:08,335 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:08,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:08,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:08,386 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T15:23:08,488 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:08,488 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T15:23:08,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:08,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:08,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:08,488 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:08,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:08,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:08,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:08,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116248505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:08,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T15:23:08,518 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:08,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116248515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:08,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:08,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116248515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:08,572 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/5c673977bd054255a12ef04ae0c37fa6 2024-11-20T15:23:08,589 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/1cd9d12da1cd496681f9fb19d48e947a is 50, key is test_row_0/B:col10/1732116187491/Put/seqid=0 2024-11-20T15:23:08,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742342_1518 (size=12001) 2024-11-20T15:23:08,621 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/1cd9d12da1cd496681f9fb19d48e947a 2024-11-20T15:23:08,632 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:08,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116248627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:08,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:08,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116248631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:08,640 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:08,640 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T15:23:08,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:08,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:08,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:08,641 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:08,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:08,642 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/983d84682dde40b1a63e78b127512f54 is 50, key is test_row_0/C:col10/1732116187491/Put/seqid=0 2024-11-20T15:23:08,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:08,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742343_1519 (size=12001) 2024-11-20T15:23:08,675 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/983d84682dde40b1a63e78b127512f54 2024-11-20T15:23:08,682 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/5c673977bd054255a12ef04ae0c37fa6 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/5c673977bd054255a12ef04ae0c37fa6 2024-11-20T15:23:08,689 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/5c673977bd054255a12ef04ae0c37fa6, entries=150, sequenceid=90, filesize=11.7 K 2024-11-20T15:23:08,690 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/1cd9d12da1cd496681f9fb19d48e947a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/1cd9d12da1cd496681f9fb19d48e947a 2024-11-20T15:23:08,694 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/1cd9d12da1cd496681f9fb19d48e947a, entries=150, sequenceid=90, filesize=11.7 K 2024-11-20T15:23:08,695 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/983d84682dde40b1a63e78b127512f54 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/983d84682dde40b1a63e78b127512f54 2024-11-20T15:23:08,701 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/983d84682dde40b1a63e78b127512f54, entries=150, sequenceid=90, filesize=11.7 K 2024-11-20T15:23:08,703 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 558f4ab8fec4b16865ece4459134acf9 in 567ms, sequenceid=90, compaction requested=true 2024-11-20T15:23:08,703 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:08,703 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:08,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:23:08,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:08,703 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:08,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:23:08,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:08,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:23:08,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:08,707 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38446 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:08,707 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:08,707 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/A is initiating minor compaction (all files) 2024-11-20T15:23:08,707 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/B is initiating minor compaction (all files) 2024-11-20T15:23:08,707 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/A in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:08,707 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/B in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:08,707 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/d957cb7b67b9421c854226e367ba72bd, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/9ae06e262b04464a9710ac3300060002, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/5c673977bd054255a12ef04ae0c37fa6] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=37.5 K 2024-11-20T15:23:08,707 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/343df17aeb7642ef9cf4b0d0a6f9cb0a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/27cb99f5aa15463badec3cdc882cfa21, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/1cd9d12da1cd496681f9fb19d48e947a] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=35.3 K 2024-11-20T15:23:08,707 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting d957cb7b67b9421c854226e367ba72bd, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732116187173 2024-11-20T15:23:08,707 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 343df17aeb7642ef9cf4b0d0a6f9cb0a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732116187173 2024-11-20T15:23:08,709 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 27cb99f5aa15463badec3cdc882cfa21, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1732116187349 2024-11-20T15:23:08,709 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ae06e262b04464a9710ac3300060002, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1732116187349 2024-11-20T15:23:08,710 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 1cd9d12da1cd496681f9fb19d48e947a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732116187491 2024-11-20T15:23:08,710 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5c673977bd054255a12ef04ae0c37fa6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732116187491 2024-11-20T15:23:08,737 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#B#compaction#441 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:08,738 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/f510935396264b07bb93d117a15fd946 is 50, key is test_row_0/B:col10/1732116187491/Put/seqid=0 2024-11-20T15:23:08,746 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#A#compaction#442 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:08,746 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/18543d6f4d6a420cb076846a95a307fb is 50, key is test_row_0/A:col10/1732116187491/Put/seqid=0 2024-11-20T15:23:08,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742344_1520 (size=12207) 2024-11-20T15:23:08,787 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/f510935396264b07bb93d117a15fd946 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/f510935396264b07bb93d117a15fd946 2024-11-20T15:23:08,794 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:08,795 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T15:23:08,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:08,795 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T15:23:08,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:08,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:08,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:08,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:08,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:08,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:08,798 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/B of 558f4ab8fec4b16865ece4459134acf9 into f510935396264b07bb93d117a15fd946(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:08,798 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:08,798 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/B, priority=13, startTime=1732116188703; duration=0sec 2024-11-20T15:23:08,798 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:08,798 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:B 2024-11-20T15:23:08,798 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:08,799 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:08,799 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/C is initiating minor compaction (all files) 2024-11-20T15:23:08,799 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/C in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:08,799 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/6c314f7a1f9a4cb1ada4586ea6783b22, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/53c42a997651408d9d529fec9a50dde3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/983d84682dde40b1a63e78b127512f54] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=35.3 K 2024-11-20T15:23:08,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742345_1521 (size=12207) 2024-11-20T15:23:08,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/c59231c6613e4405987dbb52a4dedcc1 is 50, key is test_row_0/A:col10/1732116188169/Put/seqid=0 2024-11-20T15:23:08,822 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c314f7a1f9a4cb1ada4586ea6783b22, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732116187173 2024-11-20T15:23:08,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:08,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:08,824 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 53c42a997651408d9d529fec9a50dde3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1732116187349 2024-11-20T15:23:08,825 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 983d84682dde40b1a63e78b127512f54, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732116187491 2024-11-20T15:23:08,830 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/18543d6f4d6a420cb076846a95a307fb as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/18543d6f4d6a420cb076846a95a307fb 2024-11-20T15:23:08,837 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/A of 558f4ab8fec4b16865ece4459134acf9 into 18543d6f4d6a420cb076846a95a307fb(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:08,837 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:08,837 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/A, priority=13, startTime=1732116188703; duration=0sec 2024-11-20T15:23:08,838 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:08,838 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:A 2024-11-20T15:23:08,841 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#C#compaction#444 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:08,842 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/06b5526fc8914beab7c3fd3b1a0ee7d6 is 50, key is test_row_0/C:col10/1732116187491/Put/seqid=0 2024-11-20T15:23:08,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742347_1523 (size=12207) 2024-11-20T15:23:08,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742346_1522 (size=12001) 2024-11-20T15:23:08,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:08,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116248858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:08,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:08,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116248859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:08,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:08,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116248860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:08,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:08,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116248969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:08,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:08,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116248970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:08,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:08,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116248970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:09,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:09,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116249177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:09,185 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:09,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116249182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:09,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:09,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116249182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:09,250 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/06b5526fc8914beab7c3fd3b1a0ee7d6 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/06b5526fc8914beab7c3fd3b1a0ee7d6 2024-11-20T15:23:09,256 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/C of 558f4ab8fec4b16865ece4459134acf9 into 06b5526fc8914beab7c3fd3b1a0ee7d6(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:09,256 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:09,256 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/C, priority=13, startTime=1732116188703; duration=0sec 2024-11-20T15:23:09,256 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:09,256 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:C 2024-11-20T15:23:09,263 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/c59231c6613e4405987dbb52a4dedcc1 2024-11-20T15:23:09,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/211e8457d24149aabec18030f62cd4e0 is 50, key is test_row_0/B:col10/1732116188169/Put/seqid=0 2024-11-20T15:23:09,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742348_1524 (size=12001) 2024-11-20T15:23:09,300 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/211e8457d24149aabec18030f62cd4e0 2024-11-20T15:23:09,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/624f9cfa6283491fb453f8a268c6a21b is 50, key is test_row_0/C:col10/1732116188169/Put/seqid=0 2024-11-20T15:23:09,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742349_1525 (size=12001) 2024-11-20T15:23:09,327 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/624f9cfa6283491fb453f8a268c6a21b 2024-11-20T15:23:09,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/c59231c6613e4405987dbb52a4dedcc1 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/c59231c6613e4405987dbb52a4dedcc1 2024-11-20T15:23:09,343 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/c59231c6613e4405987dbb52a4dedcc1, entries=150, sequenceid=113, filesize=11.7 K 2024-11-20T15:23:09,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/211e8457d24149aabec18030f62cd4e0 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/211e8457d24149aabec18030f62cd4e0 2024-11-20T15:23:09,351 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/211e8457d24149aabec18030f62cd4e0, entries=150, sequenceid=113, filesize=11.7 K 2024-11-20T15:23:09,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/624f9cfa6283491fb453f8a268c6a21b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/624f9cfa6283491fb453f8a268c6a21b 2024-11-20T15:23:09,362 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/624f9cfa6283491fb453f8a268c6a21b, entries=150, sequenceid=113, filesize=11.7 K 2024-11-20T15:23:09,362 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 558f4ab8fec4b16865ece4459134acf9 in 567ms, sequenceid=113, compaction requested=false 2024-11-20T15:23:09,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:09,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:09,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-20T15:23:09,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-11-20T15:23:09,367 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-20T15:23:09,367 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9480 sec 2024-11-20T15:23:09,370 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 1.9550 sec 2024-11-20T15:23:09,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:09,483 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T15:23:09,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:09,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:09,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:09,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:09,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:09,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:09,488 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/f4470ee73dfa48e3bcd9820a177a8610 is 50, key is test_row_0/A:col10/1732116188823/Put/seqid=0 2024-11-20T15:23:09,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742350_1526 (size=14391) 2024-11-20T15:23:09,516 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/f4470ee73dfa48e3bcd9820a177a8610 2024-11-20T15:23:09,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T15:23:09,520 INFO [Thread-2222 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-11-20T15:23:09,522 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:23:09,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-11-20T15:23:09,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T15:23:09,524 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:23:09,528 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:23:09,528 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:23:09,545 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/2b1807d2888d4d3db738b993a7815d3f is 50, key is test_row_0/B:col10/1732116188823/Put/seqid=0 2024-11-20T15:23:09,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:09,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116249551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:09,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:09,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116249555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:09,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:09,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116249553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:09,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742351_1527 (size=12051) 2024-11-20T15:23:09,584 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/2b1807d2888d4d3db738b993a7815d3f 2024-11-20T15:23:09,588 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T15:23:09,599 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/6c4a0ad556e449eb811f32f1931167ca is 50, key is test_row_0/C:col10/1732116188823/Put/seqid=0 2024-11-20T15:23:09,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T15:23:09,644 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:09,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116249636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:09,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:09,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116249639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:09,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742352_1528 (size=12051) 2024-11-20T15:23:09,669 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:09,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116249665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:09,669 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:09,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116249665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:09,681 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:09,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:09,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116249675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:09,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T15:23:09,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:09,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:09,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:09,689 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:09,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:09,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:09,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T15:23:09,842 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:09,842 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T15:23:09,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:09,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:09,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:09,843 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:09,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:09,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:09,875 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:09,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116249871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:09,876 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:09,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116249872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:09,890 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:09,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116249886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:09,996 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:09,997 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T15:23:09,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:09,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:09,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:09,997 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:09,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:09,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:10,051 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/6c4a0ad556e449eb811f32f1931167ca 2024-11-20T15:23:10,058 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/f4470ee73dfa48e3bcd9820a177a8610 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/f4470ee73dfa48e3bcd9820a177a8610 2024-11-20T15:23:10,063 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/f4470ee73dfa48e3bcd9820a177a8610, entries=200, sequenceid=130, filesize=14.1 K 2024-11-20T15:23:10,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/2b1807d2888d4d3db738b993a7815d3f as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/2b1807d2888d4d3db738b993a7815d3f 2024-11-20T15:23:10,068 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/2b1807d2888d4d3db738b993a7815d3f, entries=150, sequenceid=130, filesize=11.8 K 2024-11-20T15:23:10,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/6c4a0ad556e449eb811f32f1931167ca as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/6c4a0ad556e449eb811f32f1931167ca 2024-11-20T15:23:10,072 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/6c4a0ad556e449eb811f32f1931167ca, entries=150, sequenceid=130, filesize=11.8 K 2024-11-20T15:23:10,073 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 558f4ab8fec4b16865ece4459134acf9 in 589ms, sequenceid=130, compaction requested=true 2024-11-20T15:23:10,073 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:10,073 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:10,074 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38599 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:10,074 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/A is initiating minor compaction (all files) 2024-11-20T15:23:10,074 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/A in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:10,074 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/18543d6f4d6a420cb076846a95a307fb, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/c59231c6613e4405987dbb52a4dedcc1, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/f4470ee73dfa48e3bcd9820a177a8610] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=37.7 K 2024-11-20T15:23:10,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:23:10,075 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 18543d6f4d6a420cb076846a95a307fb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732116187491 2024-11-20T15:23:10,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:10,075 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:10,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:23:10,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:10,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:23:10,075 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting c59231c6613e4405987dbb52a4dedcc1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1732116188169 2024-11-20T15:23:10,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:10,075 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4470ee73dfa48e3bcd9820a177a8610, keycount=200, bloomtype=ROW, size=14.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732116188823 2024-11-20T15:23:10,076 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:10,076 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/B is initiating minor compaction (all files) 2024-11-20T15:23:10,076 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/B in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:10,076 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/f510935396264b07bb93d117a15fd946, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/211e8457d24149aabec18030f62cd4e0, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/2b1807d2888d4d3db738b993a7815d3f] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=35.4 K 2024-11-20T15:23:10,077 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting f510935396264b07bb93d117a15fd946, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732116187491 2024-11-20T15:23:10,077 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 211e8457d24149aabec18030f62cd4e0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1732116188169 2024-11-20T15:23:10,077 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b1807d2888d4d3db738b993a7815d3f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732116188823 2024-11-20T15:23:10,094 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#A#compaction#450 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:10,094 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/b61a5bb5e001414fb8406224ea6caaab is 50, key is test_row_0/A:col10/1732116188823/Put/seqid=0 2024-11-20T15:23:10,098 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#B#compaction#451 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:10,099 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/73b2966741544a85b7c19aa4e30ca42c is 50, key is test_row_0/B:col10/1732116188823/Put/seqid=0 2024-11-20T15:23:10,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T15:23:10,149 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:10,150 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T15:23:10,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:10,150 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T15:23:10,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:10,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:10,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:10,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:10,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:10,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:10,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742354_1530 (size=12359) 2024-11-20T15:23:10,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742353_1529 (size=12359) 2024-11-20T15:23:10,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/964438b534944ef6b9880c2af3df1102 is 50, key is test_row_0/A:col10/1732116189553/Put/seqid=0 2024-11-20T15:23:10,186 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:10,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:10,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742355_1531 (size=12151) 2024-11-20T15:23:10,193 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/964438b534944ef6b9880c2af3df1102 2024-11-20T15:23:10,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/b0e554361a354ce393f8d9e863410952 is 50, key is test_row_0/B:col10/1732116189553/Put/seqid=0 2024-11-20T15:23:10,232 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:10,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116250225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:10,239 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:10,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116250227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:10,240 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:10,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116250227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:10,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742356_1532 (size=12151) 2024-11-20T15:23:10,259 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/b0e554361a354ce393f8d9e863410952 2024-11-20T15:23:10,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/793b6568c19448b89adb2f43f92fdbea is 50, key is test_row_0/C:col10/1732116189553/Put/seqid=0 2024-11-20T15:23:10,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742357_1533 (size=12151) 2024-11-20T15:23:10,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:10,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116250333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:10,350 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:10,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116250342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:10,350 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:10,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116250342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:10,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:10,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116250546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:10,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:10,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116250551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:10,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:10,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116250552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:10,567 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/b61a5bb5e001414fb8406224ea6caaab as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/b61a5bb5e001414fb8406224ea6caaab 2024-11-20T15:23:10,572 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/A of 558f4ab8fec4b16865ece4459134acf9 into b61a5bb5e001414fb8406224ea6caaab(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:10,572 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:10,572 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/A, priority=13, startTime=1732116190073; duration=0sec 2024-11-20T15:23:10,572 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:10,572 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:A 2024-11-20T15:23:10,572 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:10,574 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:10,574 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/C is initiating minor compaction (all files) 2024-11-20T15:23:10,574 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/C in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:10,574 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/06b5526fc8914beab7c3fd3b1a0ee7d6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/624f9cfa6283491fb453f8a268c6a21b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/6c4a0ad556e449eb811f32f1931167ca] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=35.4 K 2024-11-20T15:23:10,574 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06b5526fc8914beab7c3fd3b1a0ee7d6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732116187491 2024-11-20T15:23:10,575 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 624f9cfa6283491fb453f8a268c6a21b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1732116188169 2024-11-20T15:23:10,575 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6c4a0ad556e449eb811f32f1931167ca, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732116188823 2024-11-20T15:23:10,579 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/73b2966741544a85b7c19aa4e30ca42c as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/73b2966741544a85b7c19aa4e30ca42c 2024-11-20T15:23:10,584 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/B of 558f4ab8fec4b16865ece4459134acf9 into 73b2966741544a85b7c19aa4e30ca42c(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:10,584 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:10,584 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/B, priority=13, startTime=1732116190075; duration=0sec 2024-11-20T15:23:10,584 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:10,584 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:B 2024-11-20T15:23:10,594 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#C#compaction#455 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:10,594 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/b4c4a073a28b47529c16edaede0875b3 is 50, key is test_row_0/C:col10/1732116188823/Put/seqid=0 2024-11-20T15:23:10,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T15:23:10,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742358_1534 (size=12359) 2024-11-20T15:23:10,655 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/b4c4a073a28b47529c16edaede0875b3 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/b4c4a073a28b47529c16edaede0875b3 2024-11-20T15:23:10,661 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/C of 558f4ab8fec4b16865ece4459134acf9 into b4c4a073a28b47529c16edaede0875b3(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:10,661 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:10,661 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/C, priority=13, startTime=1732116190075; duration=0sec 2024-11-20T15:23:10,662 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:10,662 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:C 2024-11-20T15:23:10,720 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/793b6568c19448b89adb2f43f92fdbea 2024-11-20T15:23:10,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/964438b534944ef6b9880c2af3df1102 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/964438b534944ef6b9880c2af3df1102 2024-11-20T15:23:10,732 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/964438b534944ef6b9880c2af3df1102, entries=150, sequenceid=152, filesize=11.9 K 2024-11-20T15:23:10,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/b0e554361a354ce393f8d9e863410952 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/b0e554361a354ce393f8d9e863410952 2024-11-20T15:23:10,741 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/b0e554361a354ce393f8d9e863410952, entries=150, sequenceid=152, filesize=11.9 K 2024-11-20T15:23:10,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/793b6568c19448b89adb2f43f92fdbea as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/793b6568c19448b89adb2f43f92fdbea 2024-11-20T15:23:10,750 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/793b6568c19448b89adb2f43f92fdbea, entries=150, sequenceid=152, filesize=11.9 K 2024-11-20T15:23:10,751 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 558f4ab8fec4b16865ece4459134acf9 in 601ms, sequenceid=152, compaction requested=false 2024-11-20T15:23:10,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:10,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:10,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-20T15:23:10,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-11-20T15:23:10,753 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-11-20T15:23:10,753 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2240 sec 2024-11-20T15:23:10,755 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 1.2320 sec 2024-11-20T15:23:10,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:10,869 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T15:23:10,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:10,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:10,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:10,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:10,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:10,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:10,885 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/14caa1ce980949248680de6a637a9b04 is 50, key is test_row_0/A:col10/1732116190211/Put/seqid=0 2024-11-20T15:23:10,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742359_1535 (size=14541) 2024-11-20T15:23:10,910 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/14caa1ce980949248680de6a637a9b04 2024-11-20T15:23:10,930 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/ab3cca8451e1480ab404b03ad846138a is 50, key is test_row_0/B:col10/1732116190211/Put/seqid=0 2024-11-20T15:23:10,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742360_1536 (size=12151) 2024-11-20T15:23:10,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:10,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116250956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:10,964 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/ab3cca8451e1480ab404b03ad846138a 2024-11-20T15:23:10,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:10,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116250957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:10,966 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:10,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116250959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:10,982 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/866910f9a0c5490f9b5cd64676478198 is 50, key is test_row_0/C:col10/1732116190211/Put/seqid=0 2024-11-20T15:23:11,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742361_1537 (size=12151) 2024-11-20T15:23:11,023 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/866910f9a0c5490f9b5cd64676478198 2024-11-20T15:23:11,030 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/14caa1ce980949248680de6a637a9b04 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/14caa1ce980949248680de6a637a9b04 2024-11-20T15:23:11,035 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/14caa1ce980949248680de6a637a9b04, entries=200, sequenceid=170, filesize=14.2 K 2024-11-20T15:23:11,037 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/ab3cca8451e1480ab404b03ad846138a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/ab3cca8451e1480ab404b03ad846138a 2024-11-20T15:23:11,044 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/ab3cca8451e1480ab404b03ad846138a, entries=150, sequenceid=170, filesize=11.9 K 2024-11-20T15:23:11,045 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/866910f9a0c5490f9b5cd64676478198 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/866910f9a0c5490f9b5cd64676478198 2024-11-20T15:23:11,053 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/866910f9a0c5490f9b5cd64676478198, entries=150, sequenceid=170, filesize=11.9 K 2024-11-20T15:23:11,054 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 558f4ab8fec4b16865ece4459134acf9 in 185ms, sequenceid=170, compaction requested=true 2024-11-20T15:23:11,054 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:11,056 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:11,060 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39051 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:11,060 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/A is initiating minor compaction (all files) 2024-11-20T15:23:11,060 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/A in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:11,060 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/b61a5bb5e001414fb8406224ea6caaab, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/964438b534944ef6b9880c2af3df1102, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/14caa1ce980949248680de6a637a9b04] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=38.1 K 2024-11-20T15:23:11,061 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:23:11,061 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:11,061 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:11,061 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting b61a5bb5e001414fb8406224ea6caaab, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732116188823 2024-11-20T15:23:11,062 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 964438b534944ef6b9880c2af3df1102, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732116189536 2024-11-20T15:23:11,062 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:23:11,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:11,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:23:11,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:11,063 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14caa1ce980949248680de6a637a9b04, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732116190211 2024-11-20T15:23:11,064 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:11,064 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/B is initiating minor compaction (all files) 2024-11-20T15:23:11,064 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/B in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:11,065 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/73b2966741544a85b7c19aa4e30ca42c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/b0e554361a354ce393f8d9e863410952, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/ab3cca8451e1480ab404b03ad846138a] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=35.8 K 2024-11-20T15:23:11,068 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 73b2966741544a85b7c19aa4e30ca42c, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732116188823 2024-11-20T15:23:11,069 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting b0e554361a354ce393f8d9e863410952, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732116189536 2024-11-20T15:23:11,070 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting ab3cca8451e1480ab404b03ad846138a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732116190211 2024-11-20T15:23:11,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:11,073 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T15:23:11,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:11,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:11,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:11,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:11,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:11,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:11,082 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#A#compaction#459 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:11,083 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/4613c92e5c564cfb9a9295524a463072 is 50, key is test_row_0/A:col10/1732116190211/Put/seqid=0 2024-11-20T15:23:11,096 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/6fbf314cb42e4cbc892db2462d643dfd is 50, key is test_row_0/A:col10/1732116191071/Put/seqid=0 2024-11-20T15:23:11,098 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#B#compaction#461 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:11,099 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/952d5736c8234a4792fa18c7acc71df3 is 50, key is test_row_0/B:col10/1732116190211/Put/seqid=0 2024-11-20T15:23:11,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:11,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116251110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:11,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:11,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116251112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:11,135 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:11,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116251119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:11,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742362_1538 (size=12561) 2024-11-20T15:23:11,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742363_1539 (size=14541) 2024-11-20T15:23:11,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742364_1540 (size=12561) 2024-11-20T15:23:11,222 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/952d5736c8234a4792fa18c7acc71df3 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/952d5736c8234a4792fa18c7acc71df3 2024-11-20T15:23:11,228 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/B of 558f4ab8fec4b16865ece4459134acf9 into 952d5736c8234a4792fa18c7acc71df3(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:11,228 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:11,228 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/B, priority=13, startTime=1732116191061; duration=0sec 2024-11-20T15:23:11,228 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:11,228 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:B 2024-11-20T15:23:11,228 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:11,229 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:11,229 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/C is initiating minor compaction (all files) 2024-11-20T15:23:11,229 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/C in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:11,230 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/b4c4a073a28b47529c16edaede0875b3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/793b6568c19448b89adb2f43f92fdbea, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/866910f9a0c5490f9b5cd64676478198] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=35.8 K 2024-11-20T15:23:11,230 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting b4c4a073a28b47529c16edaede0875b3, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732116188823 2024-11-20T15:23:11,230 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 793b6568c19448b89adb2f43f92fdbea, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732116189536 2024-11-20T15:23:11,231 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 866910f9a0c5490f9b5cd64676478198, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732116190211 2024-11-20T15:23:11,237 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:11,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116251220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:11,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:11,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116251226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:11,241 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#C#compaction#462 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:11,242 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/6d0621d8fdb6422db25f07d5972c99e6 is 50, key is test_row_0/C:col10/1732116190211/Put/seqid=0 2024-11-20T15:23:11,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:11,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116251238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:11,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742365_1541 (size=12561) 2024-11-20T15:23:11,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:11,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116251440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:11,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:11,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116251440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:11,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:11,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116251448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:11,568 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/4613c92e5c564cfb9a9295524a463072 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/4613c92e5c564cfb9a9295524a463072 2024-11-20T15:23:11,575 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/A of 558f4ab8fec4b16865ece4459134acf9 into 4613c92e5c564cfb9a9295524a463072(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:11,575 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:11,575 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/A, priority=13, startTime=1732116191056; duration=0sec 2024-11-20T15:23:11,575 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:11,575 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:A 2024-11-20T15:23:11,578 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/6fbf314cb42e4cbc892db2462d643dfd 2024-11-20T15:23:11,594 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/4d0dfc64b146484695c8957dba0dd546 is 50, key is test_row_0/B:col10/1732116191071/Put/seqid=0 2024-11-20T15:23:11,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T15:23:11,629 INFO [Thread-2222 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-20T15:23:11,631 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:23:11,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-11-20T15:23:11,632 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:23:11,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T15:23:11,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742366_1542 (size=12151) 2024-11-20T15:23:11,633 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:23:11,633 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:23:11,663 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:11,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116251662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:11,664 DEBUG [Thread-2216 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4169 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., hostname=0b62285ead89,33387,1732116069954, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:23:11,670 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:11,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116251665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:11,671 DEBUG [Thread-2220 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4184 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., hostname=0b62285ead89,33387,1732116069954, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:23:11,694 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/6d0621d8fdb6422db25f07d5972c99e6 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/6d0621d8fdb6422db25f07d5972c99e6 2024-11-20T15:23:11,699 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/C of 558f4ab8fec4b16865ece4459134acf9 into 6d0621d8fdb6422db25f07d5972c99e6(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:11,699 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:11,699 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/C, priority=13, startTime=1732116191063; duration=0sec 2024-11-20T15:23:11,699 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:11,699 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:C 2024-11-20T15:23:11,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T15:23:11,752 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:11,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116251747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:11,752 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:11,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116251747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:11,761 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:11,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116251754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:11,784 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:11,784 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T15:23:11,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:11,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:11,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:11,785 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:11,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:11,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:11,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T15:23:11,937 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:11,938 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T15:23:11,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:11,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:11,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:11,939 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:11,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:11,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:12,034 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/4d0dfc64b146484695c8957dba0dd546 2024-11-20T15:23:12,045 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/0f412ed1f46349c7a0fd610c5dc69d15 is 50, key is test_row_0/C:col10/1732116191071/Put/seqid=0 2024-11-20T15:23:12,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742367_1543 (size=12151) 2024-11-20T15:23:12,094 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:12,094 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T15:23:12,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:12,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:12,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:12,095 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:12,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:12,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:12,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T15:23:12,247 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:12,248 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T15:23:12,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:12,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:12,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:12,248 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:12,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:12,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:12,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:12,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116252255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:12,258 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:12,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116252257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:12,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:12,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116252266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:12,401 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:12,401 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T15:23:12,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:12,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:12,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:12,402 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:12,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:12,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:12,474 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/0f412ed1f46349c7a0fd610c5dc69d15 2024-11-20T15:23:12,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/6fbf314cb42e4cbc892db2462d643dfd as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/6fbf314cb42e4cbc892db2462d643dfd 2024-11-20T15:23:12,489 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/6fbf314cb42e4cbc892db2462d643dfd, entries=200, sequenceid=192, filesize=14.2 K 2024-11-20T15:23:12,490 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/4d0dfc64b146484695c8957dba0dd546 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/4d0dfc64b146484695c8957dba0dd546 2024-11-20T15:23:12,495 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/4d0dfc64b146484695c8957dba0dd546, entries=150, sequenceid=192, filesize=11.9 K 2024-11-20T15:23:12,497 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/0f412ed1f46349c7a0fd610c5dc69d15 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/0f412ed1f46349c7a0fd610c5dc69d15 2024-11-20T15:23:12,502 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/0f412ed1f46349c7a0fd610c5dc69d15, entries=150, sequenceid=192, filesize=11.9 K 2024-11-20T15:23:12,503 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 558f4ab8fec4b16865ece4459134acf9 in 1430ms, sequenceid=192, compaction requested=false 2024-11-20T15:23:12,503 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:12,554 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:12,555 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T15:23:12,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:12,555 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T15:23:12,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:12,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:12,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:12,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:12,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:12,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:12,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/9588c4dba8d84c748103c11f853714e4 is 50, key is test_row_0/A:col10/1732116191108/Put/seqid=0 2024-11-20T15:23:12,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742368_1544 (size=12151) 2024-11-20T15:23:12,616 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/9588c4dba8d84c748103c11f853714e4 2024-11-20T15:23:12,623 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/a21aeb7baff147ef9ea7d045f2f98a14, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/cbbb15d8c3ca42278a5d345df508605b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/d957cb7b67b9421c854226e367ba72bd, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/132220622be4447cb08d06f2460ae7d5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/9ae06e262b04464a9710ac3300060002, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/18543d6f4d6a420cb076846a95a307fb, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/5c673977bd054255a12ef04ae0c37fa6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/c59231c6613e4405987dbb52a4dedcc1, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/f4470ee73dfa48e3bcd9820a177a8610, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/b61a5bb5e001414fb8406224ea6caaab, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/964438b534944ef6b9880c2af3df1102, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/14caa1ce980949248680de6a637a9b04] to archive 2024-11-20T15:23:12,624 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T15:23:12,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/8e1f9d158c0b45c996f5b0d6e24876dc is 50, key is test_row_0/B:col10/1732116191108/Put/seqid=0 2024-11-20T15:23:12,636 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/a21aeb7baff147ef9ea7d045f2f98a14 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/a21aeb7baff147ef9ea7d045f2f98a14 2024-11-20T15:23:12,639 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/cbbb15d8c3ca42278a5d345df508605b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/cbbb15d8c3ca42278a5d345df508605b 2024-11-20T15:23:12,642 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/d957cb7b67b9421c854226e367ba72bd to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/d957cb7b67b9421c854226e367ba72bd 2024-11-20T15:23:12,644 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/132220622be4447cb08d06f2460ae7d5 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/132220622be4447cb08d06f2460ae7d5 2024-11-20T15:23:12,645 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/9ae06e262b04464a9710ac3300060002 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/9ae06e262b04464a9710ac3300060002 2024-11-20T15:23:12,660 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/18543d6f4d6a420cb076846a95a307fb to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/18543d6f4d6a420cb076846a95a307fb 2024-11-20T15:23:12,662 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/5c673977bd054255a12ef04ae0c37fa6 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/5c673977bd054255a12ef04ae0c37fa6 2024-11-20T15:23:12,664 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/c59231c6613e4405987dbb52a4dedcc1 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/c59231c6613e4405987dbb52a4dedcc1 2024-11-20T15:23:12,665 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/f4470ee73dfa48e3bcd9820a177a8610 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/f4470ee73dfa48e3bcd9820a177a8610 2024-11-20T15:23:12,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742369_1545 (size=12151) 2024-11-20T15:23:12,667 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/8e1f9d158c0b45c996f5b0d6e24876dc 2024-11-20T15:23:12,667 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/b61a5bb5e001414fb8406224ea6caaab to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/b61a5bb5e001414fb8406224ea6caaab 2024-11-20T15:23:12,668 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/964438b534944ef6b9880c2af3df1102 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/964438b534944ef6b9880c2af3df1102 2024-11-20T15:23:12,672 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/14caa1ce980949248680de6a637a9b04 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/14caa1ce980949248680de6a637a9b04 2024-11-20T15:23:12,674 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/75a44a59c572433ca14e256d130cd4ff, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/0b00a44d371145779f736298771f49d7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/343df17aeb7642ef9cf4b0d0a6f9cb0a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/ad1aff44c70445dbaafe987f813d3fa2, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/27cb99f5aa15463badec3cdc882cfa21, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/f510935396264b07bb93d117a15fd946, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/1cd9d12da1cd496681f9fb19d48e947a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/211e8457d24149aabec18030f62cd4e0, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/73b2966741544a85b7c19aa4e30ca42c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/2b1807d2888d4d3db738b993a7815d3f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/b0e554361a354ce393f8d9e863410952, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/ab3cca8451e1480ab404b03ad846138a] to archive 2024-11-20T15:23:12,676 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T15:23:12,678 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/75a44a59c572433ca14e256d130cd4ff to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/75a44a59c572433ca14e256d130cd4ff 2024-11-20T15:23:12,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/0721bbed1f764d5782980e58a17fc77e is 50, key is test_row_0/C:col10/1732116191108/Put/seqid=0 2024-11-20T15:23:12,681 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/0b00a44d371145779f736298771f49d7 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/0b00a44d371145779f736298771f49d7 2024-11-20T15:23:12,683 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/343df17aeb7642ef9cf4b0d0a6f9cb0a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/343df17aeb7642ef9cf4b0d0a6f9cb0a 2024-11-20T15:23:12,684 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/ad1aff44c70445dbaafe987f813d3fa2 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/ad1aff44c70445dbaafe987f813d3fa2 2024-11-20T15:23:12,685 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/27cb99f5aa15463badec3cdc882cfa21 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/27cb99f5aa15463badec3cdc882cfa21 2024-11-20T15:23:12,687 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/f510935396264b07bb93d117a15fd946 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/f510935396264b07bb93d117a15fd946 2024-11-20T15:23:12,688 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/1cd9d12da1cd496681f9fb19d48e947a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/1cd9d12da1cd496681f9fb19d48e947a 2024-11-20T15:23:12,690 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/211e8457d24149aabec18030f62cd4e0 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/211e8457d24149aabec18030f62cd4e0 2024-11-20T15:23:12,691 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/73b2966741544a85b7c19aa4e30ca42c to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/73b2966741544a85b7c19aa4e30ca42c 2024-11-20T15:23:12,693 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/2b1807d2888d4d3db738b993a7815d3f to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/2b1807d2888d4d3db738b993a7815d3f 2024-11-20T15:23:12,694 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/b0e554361a354ce393f8d9e863410952 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/b0e554361a354ce393f8d9e863410952 2024-11-20T15:23:12,695 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/ab3cca8451e1480ab404b03ad846138a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/ab3cca8451e1480ab404b03ad846138a 2024-11-20T15:23:12,697 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/6d3292b4c1cc4bed904225a9ed32f47a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/d4927b004a4d49acb63135826420ce02, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/6c314f7a1f9a4cb1ada4586ea6783b22, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/1351ed3e93464d93be366999c242faa4, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/53c42a997651408d9d529fec9a50dde3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/06b5526fc8914beab7c3fd3b1a0ee7d6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/983d84682dde40b1a63e78b127512f54, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/624f9cfa6283491fb453f8a268c6a21b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/b4c4a073a28b47529c16edaede0875b3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/6c4a0ad556e449eb811f32f1931167ca, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/793b6568c19448b89adb2f43f92fdbea, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/866910f9a0c5490f9b5cd64676478198] to archive 2024-11-20T15:23:12,713 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T15:23:12,717 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/6d3292b4c1cc4bed904225a9ed32f47a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/6d3292b4c1cc4bed904225a9ed32f47a 2024-11-20T15:23:12,719 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/d4927b004a4d49acb63135826420ce02 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/d4927b004a4d49acb63135826420ce02 2024-11-20T15:23:12,721 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/6c314f7a1f9a4cb1ada4586ea6783b22 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/6c314f7a1f9a4cb1ada4586ea6783b22 2024-11-20T15:23:12,723 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/1351ed3e93464d93be366999c242faa4 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/1351ed3e93464d93be366999c242faa4 2024-11-20T15:23:12,725 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/53c42a997651408d9d529fec9a50dde3 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/53c42a997651408d9d529fec9a50dde3 2024-11-20T15:23:12,726 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/06b5526fc8914beab7c3fd3b1a0ee7d6 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/06b5526fc8914beab7c3fd3b1a0ee7d6 2024-11-20T15:23:12,728 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/983d84682dde40b1a63e78b127512f54 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/983d84682dde40b1a63e78b127512f54 2024-11-20T15:23:12,730 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/624f9cfa6283491fb453f8a268c6a21b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/624f9cfa6283491fb453f8a268c6a21b 2024-11-20T15:23:12,732 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/b4c4a073a28b47529c16edaede0875b3 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/b4c4a073a28b47529c16edaede0875b3 2024-11-20T15:23:12,735 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/6c4a0ad556e449eb811f32f1931167ca to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/6c4a0ad556e449eb811f32f1931167ca 2024-11-20T15:23:12,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T15:23:12,738 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/793b6568c19448b89adb2f43f92fdbea to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/793b6568c19448b89adb2f43f92fdbea 2024-11-20T15:23:12,739 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/0b62285ead89:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/866910f9a0c5490f9b5cd64676478198 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/866910f9a0c5490f9b5cd64676478198 2024-11-20T15:23:12,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742370_1546 (size=12151) 2024-11-20T15:23:12,743 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/0721bbed1f764d5782980e58a17fc77e 2024-11-20T15:23:12,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/9588c4dba8d84c748103c11f853714e4 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/9588c4dba8d84c748103c11f853714e4 2024-11-20T15:23:12,762 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/9588c4dba8d84c748103c11f853714e4, entries=150, sequenceid=209, filesize=11.9 K 2024-11-20T15:23:12,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/8e1f9d158c0b45c996f5b0d6e24876dc as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/8e1f9d158c0b45c996f5b0d6e24876dc 2024-11-20T15:23:12,769 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/8e1f9d158c0b45c996f5b0d6e24876dc, entries=150, sequenceid=209, filesize=11.9 K 2024-11-20T15:23:12,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/0721bbed1f764d5782980e58a17fc77e as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/0721bbed1f764d5782980e58a17fc77e 2024-11-20T15:23:12,777 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/0721bbed1f764d5782980e58a17fc77e, entries=150, sequenceid=209, filesize=11.9 K 2024-11-20T15:23:12,778 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=0 B/0 for 558f4ab8fec4b16865ece4459134acf9 in 223ms, sequenceid=209, compaction requested=true 2024-11-20T15:23:12,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:12,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:12,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-11-20T15:23:12,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-11-20T15:23:12,782 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-20T15:23:12,782 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1470 sec 2024-11-20T15:23:12,784 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 1.1520 sec 2024-11-20T15:23:13,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:13,282 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T15:23:13,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:13,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:13,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:13,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:13,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:13,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:13,288 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/fc7e7c3916da46da97db55a3f1bfea4c is 50, key is test_row_0/A:col10/1732116193280/Put/seqid=0 2024-11-20T15:23:13,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742371_1547 (size=16931) 2024-11-20T15:23:13,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:13,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116253339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:13,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:13,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116253346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:13,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:13,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116253347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:13,455 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:13,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116253448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:13,456 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:13,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116253452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:13,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:13,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116253455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:13,667 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:13,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116253659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:13,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:13,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116253660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:13,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:13,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116253665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:13,702 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/fc7e7c3916da46da97db55a3f1bfea4c 2024-11-20T15:23:13,722 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/e3c967bd6baa4a09b15efa02ccbd7b8d is 50, key is test_row_0/B:col10/1732116193280/Put/seqid=0 2024-11-20T15:23:13,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T15:23:13,738 INFO [Thread-2222 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-11-20T15:23:13,740 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:23:13,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees 2024-11-20T15:23:13,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T15:23:13,742 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:23:13,742 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:23:13,742 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:23:13,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742372_1548 (size=12151) 2024-11-20T15:23:13,754 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/e3c967bd6baa4a09b15efa02ccbd7b8d 2024-11-20T15:23:13,761 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/885839d709a7406598df265dc1b3a4ca is 50, key is test_row_0/C:col10/1732116193280/Put/seqid=0 2024-11-20T15:23:13,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742373_1549 (size=12151) 2024-11-20T15:23:13,787 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/885839d709a7406598df265dc1b3a4ca 2024-11-20T15:23:13,796 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/fc7e7c3916da46da97db55a3f1bfea4c as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/fc7e7c3916da46da97db55a3f1bfea4c 2024-11-20T15:23:13,809 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/fc7e7c3916da46da97db55a3f1bfea4c, entries=250, sequenceid=220, filesize=16.5 K 2024-11-20T15:23:13,813 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/e3c967bd6baa4a09b15efa02ccbd7b8d as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/e3c967bd6baa4a09b15efa02ccbd7b8d 2024-11-20T15:23:13,817 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/e3c967bd6baa4a09b15efa02ccbd7b8d, entries=150, sequenceid=220, filesize=11.9 K 2024-11-20T15:23:13,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/885839d709a7406598df265dc1b3a4ca as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/885839d709a7406598df265dc1b3a4ca 2024-11-20T15:23:13,826 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/885839d709a7406598df265dc1b3a4ca, entries=150, sequenceid=220, filesize=11.9 K 2024-11-20T15:23:13,826 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 558f4ab8fec4b16865ece4459134acf9 in 545ms, sequenceid=220, compaction requested=true 2024-11-20T15:23:13,827 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:13,827 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:23:13,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:23:13,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:13,827 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:23:13,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:23:13,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:13,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:23:13,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:13,830 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 56184 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:23:13,830 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/A is initiating minor compaction (all files) 2024-11-20T15:23:13,830 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/A in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:13,830 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/4613c92e5c564cfb9a9295524a463072, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/6fbf314cb42e4cbc892db2462d643dfd, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/9588c4dba8d84c748103c11f853714e4, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/fc7e7c3916da46da97db55a3f1bfea4c] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=54.9 K 2024-11-20T15:23:13,832 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:23:13,832 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/B is initiating minor compaction (all files) 2024-11-20T15:23:13,832 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/B in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:13,832 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/952d5736c8234a4792fa18c7acc71df3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/4d0dfc64b146484695c8957dba0dd546, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/8e1f9d158c0b45c996f5b0d6e24876dc, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/e3c967bd6baa4a09b15efa02ccbd7b8d] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=47.9 K 2024-11-20T15:23:13,832 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4613c92e5c564cfb9a9295524a463072, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732116190211 2024-11-20T15:23:13,832 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 952d5736c8234a4792fa18c7acc71df3, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732116190211 2024-11-20T15:23:13,833 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6fbf314cb42e4cbc892db2462d643dfd, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1732116190946 2024-11-20T15:23:13,833 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 4d0dfc64b146484695c8957dba0dd546, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1732116190946 2024-11-20T15:23:13,833 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9588c4dba8d84c748103c11f853714e4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732116191096 2024-11-20T15:23:13,833 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc7e7c3916da46da97db55a3f1bfea4c, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732116193270 2024-11-20T15:23:13,834 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e1f9d158c0b45c996f5b0d6e24876dc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732116191096 2024-11-20T15:23:13,835 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting e3c967bd6baa4a09b15efa02ccbd7b8d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732116193276 2024-11-20T15:23:13,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T15:23:13,859 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#B#compaction#472 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:13,859 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#A#compaction#471 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:13,859 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/38821d700772465c8c41afa2a56e0ebc is 50, key is test_row_0/A:col10/1732116193280/Put/seqid=0 2024-11-20T15:23:13,859 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/a7ac53df52dc45f38521fabb30ecfafc is 50, key is test_row_0/B:col10/1732116193280/Put/seqid=0 2024-11-20T15:23:13,894 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:13,896 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-20T15:23:13,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:13,896 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T15:23:13,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:13,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:13,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:13,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:13,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:13,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:13,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742375_1551 (size=12289) 2024-11-20T15:23:13,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/a396fbba6d3f41febd6233b5c38ab8ce is 50, key is test_row_0/A:col10/1732116193345/Put/seqid=0 2024-11-20T15:23:13,910 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/a7ac53df52dc45f38521fabb30ecfafc as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/a7ac53df52dc45f38521fabb30ecfafc 2024-11-20T15:23:13,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742374_1550 (size=12289) 2024-11-20T15:23:13,922 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/B of 558f4ab8fec4b16865ece4459134acf9 into a7ac53df52dc45f38521fabb30ecfafc(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:13,922 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:13,922 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/B, priority=12, startTime=1732116193827; duration=0sec 2024-11-20T15:23:13,922 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:13,922 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:B 2024-11-20T15:23:13,922 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:23:13,924 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/38821d700772465c8c41afa2a56e0ebc as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/38821d700772465c8c41afa2a56e0ebc 2024-11-20T15:23:13,924 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:23:13,925 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/C is initiating minor compaction (all files) 2024-11-20T15:23:13,925 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/C in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:13,925 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/6d0621d8fdb6422db25f07d5972c99e6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/0f412ed1f46349c7a0fd610c5dc69d15, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/0721bbed1f764d5782980e58a17fc77e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/885839d709a7406598df265dc1b3a4ca] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=47.9 K 2024-11-20T15:23:13,926 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d0621d8fdb6422db25f07d5972c99e6, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732116190211 2024-11-20T15:23:13,927 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f412ed1f46349c7a0fd610c5dc69d15, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1732116190946 2024-11-20T15:23:13,928 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 0721bbed1f764d5782980e58a17fc77e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732116191096 2024-11-20T15:23:13,928 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 885839d709a7406598df265dc1b3a4ca, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732116193276 2024-11-20T15:23:13,932 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/A of 558f4ab8fec4b16865ece4459134acf9 into 38821d700772465c8c41afa2a56e0ebc(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:13,932 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:13,932 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/A, priority=12, startTime=1732116193827; duration=0sec 2024-11-20T15:23:13,932 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:13,932 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:A 2024-11-20T15:23:13,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742376_1552 (size=12151) 2024-11-20T15:23:13,945 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/a396fbba6d3f41febd6233b5c38ab8ce 2024-11-20T15:23:13,962 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#C#compaction#474 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:13,962 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/baa4076e6cf543f0a48b25a6d7cf23f9 is 50, key is test_row_0/C:col10/1732116193280/Put/seqid=0 2024-11-20T15:23:13,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/3bf21dbdf2504fd28bb0333f645dae86 is 50, key is test_row_0/B:col10/1732116193345/Put/seqid=0 2024-11-20T15:23:13,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:13,984 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:13,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742377_1553 (size=12289) 2024-11-20T15:23:14,005 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/baa4076e6cf543f0a48b25a6d7cf23f9 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/baa4076e6cf543f0a48b25a6d7cf23f9 2024-11-20T15:23:14,010 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/C of 558f4ab8fec4b16865ece4459134acf9 into baa4076e6cf543f0a48b25a6d7cf23f9(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:14,010 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:14,010 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/C, priority=12, startTime=1732116193828; duration=0sec 2024-11-20T15:23:14,010 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:14,010 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:C 2024-11-20T15:23:14,020 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:14,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116253999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:14,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742378_1554 (size=12151) 2024-11-20T15:23:14,030 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/3bf21dbdf2504fd28bb0333f645dae86 2024-11-20T15:23:14,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:14,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116254021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:14,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:14,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116254021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:14,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T15:23:14,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/908c1a0578284130b539b637d751c631 is 50, key is test_row_0/C:col10/1732116193345/Put/seqid=0 2024-11-20T15:23:14,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742379_1555 (size=12151) 2024-11-20T15:23:14,102 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/908c1a0578284130b539b637d751c631 2024-11-20T15:23:14,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/a396fbba6d3f41febd6233b5c38ab8ce as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/a396fbba6d3f41febd6233b5c38ab8ce 2024-11-20T15:23:14,111 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/a396fbba6d3f41febd6233b5c38ab8ce, entries=150, sequenceid=245, filesize=11.9 K 2024-11-20T15:23:14,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/3bf21dbdf2504fd28bb0333f645dae86 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/3bf21dbdf2504fd28bb0333f645dae86 2024-11-20T15:23:14,116 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/3bf21dbdf2504fd28bb0333f645dae86, entries=150, sequenceid=245, filesize=11.9 K 2024-11-20T15:23:14,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/908c1a0578284130b539b637d751c631 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/908c1a0578284130b539b637d751c631 2024-11-20T15:23:14,119 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/908c1a0578284130b539b637d751c631, entries=150, sequenceid=245, filesize=11.9 K 2024-11-20T15:23:14,120 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 558f4ab8fec4b16865ece4459134acf9 in 224ms, sequenceid=245, compaction requested=false 2024-11-20T15:23:14,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:14,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:14,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-11-20T15:23:14,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-11-20T15:23:14,123 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-11-20T15:23:14,124 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 380 msec 2024-11-20T15:23:14,125 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees in 384 msec 2024-11-20T15:23:14,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:14,128 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T15:23:14,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:14,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:14,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:14,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:14,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:14,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:14,133 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/d3f8cba5c69e41f3b789bb2cc391d9af is 50, key is test_row_0/A:col10/1732116194013/Put/seqid=0 2024-11-20T15:23:14,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742380_1556 (size=14641) 2024-11-20T15:23:14,174 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:14,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116254170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:14,174 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:14,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116254171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:14,175 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:14,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116254171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:14,278 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:14,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116254275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:14,278 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:14,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116254275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:14,279 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:14,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116254276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:14,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T15:23:14,345 INFO [Thread-2222 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-11-20T15:23:14,347 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:23:14,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees 2024-11-20T15:23:14,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T15:23:14,348 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:23:14,349 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:23:14,349 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:23:14,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T15:23:14,482 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:14,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116254479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:14,483 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:14,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116254480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:14,483 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:14,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116254481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:14,500 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:14,501 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-20T15:23:14,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:14,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:14,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:14,501 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:14,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:14,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:14,538 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/d3f8cba5c69e41f3b789bb2cc391d9af 2024-11-20T15:23:14,546 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/52d64d5c8f064607b3341fdaa0c6afd6 is 50, key is test_row_0/B:col10/1732116194013/Put/seqid=0 2024-11-20T15:23:14,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742381_1557 (size=12251) 2024-11-20T15:23:14,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T15:23:14,653 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:14,653 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-20T15:23:14,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:14,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:14,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:14,654 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:14,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:14,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:14,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:14,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116254785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:14,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:14,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116254786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:14,791 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:14,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116254786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:14,806 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:14,806 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-20T15:23:14,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:14,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:14,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:14,807 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:14,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:14,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:14,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T15:23:14,952 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/52d64d5c8f064607b3341fdaa0c6afd6 2024-11-20T15:23:14,958 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/36a3535f0dd8413dbc23957c7ca042e9 is 50, key is test_row_0/C:col10/1732116194013/Put/seqid=0 2024-11-20T15:23:14,958 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:14,959 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-20T15:23:14,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:14,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:14,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:14,959 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:14,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:14,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:14,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742382_1558 (size=12251) 2024-11-20T15:23:15,111 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:15,112 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-20T15:23:15,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:15,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:15,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:15,112 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:15,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:15,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:15,264 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:15,264 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-20T15:23:15,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:15,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:15,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:15,265 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:15,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:15,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:15,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:15,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116255292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:15,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:15,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116255292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:15,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:15,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116255292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:15,364 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/36a3535f0dd8413dbc23957c7ca042e9 2024-11-20T15:23:15,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/d3f8cba5c69e41f3b789bb2cc391d9af as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/d3f8cba5c69e41f3b789bb2cc391d9af 2024-11-20T15:23:15,371 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/d3f8cba5c69e41f3b789bb2cc391d9af, entries=200, sequenceid=261, filesize=14.3 K 2024-11-20T15:23:15,371 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/52d64d5c8f064607b3341fdaa0c6afd6 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/52d64d5c8f064607b3341fdaa0c6afd6 2024-11-20T15:23:15,375 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/52d64d5c8f064607b3341fdaa0c6afd6, entries=150, sequenceid=261, filesize=12.0 K 2024-11-20T15:23:15,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/36a3535f0dd8413dbc23957c7ca042e9 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/36a3535f0dd8413dbc23957c7ca042e9 2024-11-20T15:23:15,381 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/36a3535f0dd8413dbc23957c7ca042e9, entries=150, sequenceid=261, filesize=12.0 K 2024-11-20T15:23:15,382 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 558f4ab8fec4b16865ece4459134acf9 in 1253ms, sequenceid=261, compaction requested=true 2024-11-20T15:23:15,382 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:15,382 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:23:15,382 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:15,382 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:23:15,382 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:15,382 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:15,382 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:23:15,382 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:15,382 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:15,388 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36691 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:15,389 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39081 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:15,389 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/B is initiating minor compaction (all files) 2024-11-20T15:23:15,389 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/A is initiating minor compaction (all files) 2024-11-20T15:23:15,389 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/B in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:15,389 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/A in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:15,389 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/a7ac53df52dc45f38521fabb30ecfafc, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/3bf21dbdf2504fd28bb0333f645dae86, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/52d64d5c8f064607b3341fdaa0c6afd6] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=35.8 K 2024-11-20T15:23:15,389 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/38821d700772465c8c41afa2a56e0ebc, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/a396fbba6d3f41febd6233b5c38ab8ce, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/d3f8cba5c69e41f3b789bb2cc391d9af] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=38.2 K 2024-11-20T15:23:15,389 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting a7ac53df52dc45f38521fabb30ecfafc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732116193276 2024-11-20T15:23:15,389 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38821d700772465c8c41afa2a56e0ebc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732116193276 2024-11-20T15:23:15,389 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 3bf21dbdf2504fd28bb0333f645dae86, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732116193325 2024-11-20T15:23:15,389 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting a396fbba6d3f41febd6233b5c38ab8ce, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732116193325 2024-11-20T15:23:15,390 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 52d64d5c8f064607b3341fdaa0c6afd6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1732116193997 2024-11-20T15:23:15,390 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3f8cba5c69e41f3b789bb2cc391d9af, keycount=200, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1732116193988 2024-11-20T15:23:15,396 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#B#compaction#481 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:15,396 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#A#compaction#480 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:15,396 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/234fbe14e68f4d989df16b95f660a389 is 50, key is test_row_0/B:col10/1732116194013/Put/seqid=0 2024-11-20T15:23:15,397 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/181ec45a3c5a420aa5230a16c964e335 is 50, key is test_row_0/A:col10/1732116194013/Put/seqid=0 2024-11-20T15:23:15,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742384_1560 (size=12491) 2024-11-20T15:23:15,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742383_1559 (size=12491) 2024-11-20T15:23:15,416 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:15,417 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-20T15:23:15,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:15,417 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T15:23:15,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:15,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:15,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:15,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:15,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:15,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:15,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/88514672817047b8b718289fdcd092a1 is 50, key is test_row_0/A:col10/1732116194170/Put/seqid=0 2024-11-20T15:23:15,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742385_1561 (size=12301) 2024-11-20T15:23:15,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T15:23:15,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:15,690 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:15,725 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:15,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116255720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:15,725 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:15,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116255721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:15,814 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/181ec45a3c5a420aa5230a16c964e335 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/181ec45a3c5a420aa5230a16c964e335 2024-11-20T15:23:15,818 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/234fbe14e68f4d989df16b95f660a389 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/234fbe14e68f4d989df16b95f660a389 2024-11-20T15:23:15,818 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/A of 558f4ab8fec4b16865ece4459134acf9 into 181ec45a3c5a420aa5230a16c964e335(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:15,819 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:15,819 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/A, priority=13, startTime=1732116195382; duration=0sec 2024-11-20T15:23:15,819 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:15,819 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:A 2024-11-20T15:23:15,819 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:15,820 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36691 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:15,820 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/C is initiating minor compaction (all files) 2024-11-20T15:23:15,820 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/C in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:15,820 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/baa4076e6cf543f0a48b25a6d7cf23f9, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/908c1a0578284130b539b637d751c631, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/36a3535f0dd8413dbc23957c7ca042e9] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=35.8 K 2024-11-20T15:23:15,821 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting baa4076e6cf543f0a48b25a6d7cf23f9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732116193276 2024-11-20T15:23:15,821 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 908c1a0578284130b539b637d751c631, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732116193325 2024-11-20T15:23:15,821 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 36a3535f0dd8413dbc23957c7ca042e9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1732116193997 2024-11-20T15:23:15,822 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/B of 558f4ab8fec4b16865ece4459134acf9 into 234fbe14e68f4d989df16b95f660a389(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:15,823 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:15,823 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/B, priority=13, startTime=1732116195382; duration=0sec 2024-11-20T15:23:15,823 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:15,823 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:B 2024-11-20T15:23:15,827 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:15,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116255826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:15,827 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:15,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116255826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:15,827 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#C#compaction#483 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:15,828 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/f1cae391da904884bc0991b7773c9aec is 50, key is test_row_0/C:col10/1732116194013/Put/seqid=0 2024-11-20T15:23:15,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742386_1562 (size=12491) 2024-11-20T15:23:15,835 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/88514672817047b8b718289fdcd092a1 2024-11-20T15:23:15,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/cbe5a57172b7478095fd57d88d4db874 is 50, key is test_row_0/B:col10/1732116194170/Put/seqid=0 2024-11-20T15:23:15,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742387_1563 (size=12301) 2024-11-20T15:23:16,030 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:16,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116256029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:16,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:16,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116256029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:16,235 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/f1cae391da904884bc0991b7773c9aec as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/f1cae391da904884bc0991b7773c9aec 2024-11-20T15:23:16,239 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/C of 558f4ab8fec4b16865ece4459134acf9 into f1cae391da904884bc0991b7773c9aec(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:16,239 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:16,239 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/C, priority=13, startTime=1732116195382; duration=0sec 2024-11-20T15:23:16,239 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:16,239 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:C 2024-11-20T15:23:16,244 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/cbe5a57172b7478095fd57d88d4db874 2024-11-20T15:23:16,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/f4dd69e4e9324ccd82086471fa71c5e7 is 50, key is test_row_0/C:col10/1732116194170/Put/seqid=0 2024-11-20T15:23:16,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742388_1564 (size=12301) 2024-11-20T15:23:16,301 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:16,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116256299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:16,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:16,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116256303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:16,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:16,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116256304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:16,335 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:16,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116256333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:16,335 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:16,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116256334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:16,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T15:23:16,654 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/f4dd69e4e9324ccd82086471fa71c5e7 2024-11-20T15:23:16,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/88514672817047b8b718289fdcd092a1 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/88514672817047b8b718289fdcd092a1 2024-11-20T15:23:16,662 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/88514672817047b8b718289fdcd092a1, entries=150, sequenceid=284, filesize=12.0 K 2024-11-20T15:23:16,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/cbe5a57172b7478095fd57d88d4db874 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/cbe5a57172b7478095fd57d88d4db874 2024-11-20T15:23:16,665 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/cbe5a57172b7478095fd57d88d4db874, entries=150, sequenceid=284, filesize=12.0 K 2024-11-20T15:23:16,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/f4dd69e4e9324ccd82086471fa71c5e7 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/f4dd69e4e9324ccd82086471fa71c5e7 2024-11-20T15:23:16,669 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/f4dd69e4e9324ccd82086471fa71c5e7, entries=150, sequenceid=284, filesize=12.0 K 2024-11-20T15:23:16,670 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 558f4ab8fec4b16865ece4459134acf9 in 1253ms, sequenceid=284, compaction requested=false 2024-11-20T15:23:16,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:16,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:16,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=147 2024-11-20T15:23:16,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=147 2024-11-20T15:23:16,672 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-11-20T15:23:16,672 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3220 sec 2024-11-20T15:23:16,673 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees in 2.3250 sec 2024-11-20T15:23:16,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:16,842 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T15:23:16,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:16,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:16,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:16,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:16,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:16,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:16,866 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/7dce35b6caae40ae93c6fc300240c707 is 50, key is test_row_0/A:col10/1732116195717/Put/seqid=0 2024-11-20T15:23:16,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742389_1565 (size=14741) 2024-11-20T15:23:16,874 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=302 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/7dce35b6caae40ae93c6fc300240c707 2024-11-20T15:23:16,882 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/6f02ee10b9e94e8eac1b3d967060ef8f is 50, key is test_row_0/B:col10/1732116195717/Put/seqid=0 2024-11-20T15:23:16,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742390_1566 (size=12301) 2024-11-20T15:23:16,909 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=302 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/6f02ee10b9e94e8eac1b3d967060ef8f 2024-11-20T15:23:16,919 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/17426c75cb344f26aed014d74bad4d44 is 50, key is test_row_0/C:col10/1732116195717/Put/seqid=0 2024-11-20T15:23:16,922 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:16,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116256914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:16,922 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:16,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116256916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:16,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742391_1567 (size=12301) 2024-11-20T15:23:17,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:17,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116257023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:17,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:17,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116257024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:17,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:17,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116257227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:17,232 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:17,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116257229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:17,333 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=302 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/17426c75cb344f26aed014d74bad4d44 2024-11-20T15:23:17,337 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/7dce35b6caae40ae93c6fc300240c707 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/7dce35b6caae40ae93c6fc300240c707 2024-11-20T15:23:17,340 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/7dce35b6caae40ae93c6fc300240c707, entries=200, sequenceid=302, filesize=14.4 K 2024-11-20T15:23:17,341 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/6f02ee10b9e94e8eac1b3d967060ef8f as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/6f02ee10b9e94e8eac1b3d967060ef8f 2024-11-20T15:23:17,344 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/6f02ee10b9e94e8eac1b3d967060ef8f, entries=150, sequenceid=302, filesize=12.0 K 2024-11-20T15:23:17,345 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/17426c75cb344f26aed014d74bad4d44 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/17426c75cb344f26aed014d74bad4d44 2024-11-20T15:23:17,353 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/17426c75cb344f26aed014d74bad4d44, entries=150, sequenceid=302, filesize=12.0 K 2024-11-20T15:23:17,354 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 558f4ab8fec4b16865ece4459134acf9 in 512ms, sequenceid=302, compaction requested=true 2024-11-20T15:23:17,354 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:17,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:23:17,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:17,354 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:17,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:23:17,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:17,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:23:17,354 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:17,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:17,355 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39533 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:17,355 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37093 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:17,355 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/A is initiating minor compaction (all files) 2024-11-20T15:23:17,355 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/B is initiating minor compaction (all files) 2024-11-20T15:23:17,355 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/A in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:17,355 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/B in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:17,355 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/181ec45a3c5a420aa5230a16c964e335, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/88514672817047b8b718289fdcd092a1, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/7dce35b6caae40ae93c6fc300240c707] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=38.6 K 2024-11-20T15:23:17,355 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/234fbe14e68f4d989df16b95f660a389, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/cbe5a57172b7478095fd57d88d4db874, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/6f02ee10b9e94e8eac1b3d967060ef8f] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=36.2 K 2024-11-20T15:23:17,355 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 181ec45a3c5a420aa5230a16c964e335, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1732116193997 2024-11-20T15:23:17,356 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 234fbe14e68f4d989df16b95f660a389, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1732116193997 2024-11-20T15:23:17,356 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 88514672817047b8b718289fdcd092a1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732116194169 2024-11-20T15:23:17,356 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting cbe5a57172b7478095fd57d88d4db874, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732116194169 2024-11-20T15:23:17,357 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7dce35b6caae40ae93c6fc300240c707, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1732116195717 2024-11-20T15:23:17,357 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f02ee10b9e94e8eac1b3d967060ef8f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1732116195717 2024-11-20T15:23:17,365 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#A#compaction#489 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:17,366 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/b6a1a0271b9b44239c698e16eef4ff47 is 50, key is test_row_0/A:col10/1732116195717/Put/seqid=0 2024-11-20T15:23:17,372 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#B#compaction#490 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:17,372 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/eb1fca155e5448acb65a8e9d6a377d62 is 50, key is test_row_0/B:col10/1732116195717/Put/seqid=0 2024-11-20T15:23:17,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742392_1568 (size=12643) 2024-11-20T15:23:17,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742393_1569 (size=12643) 2024-11-20T15:23:17,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:17,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T15:23:17,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:17,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:17,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:17,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:17,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:17,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:17,539 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/4f78b7c9fc9e4693872eb5f23e2f68ec is 50, key is test_row_0/A:col10/1732116197533/Put/seqid=0 2024-11-20T15:23:17,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742394_1570 (size=14741) 2024-11-20T15:23:17,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:17,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116257561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:17,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:17,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116257568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:17,673 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:17,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116257669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:17,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:17,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116257676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:17,785 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/b6a1a0271b9b44239c698e16eef4ff47 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/b6a1a0271b9b44239c698e16eef4ff47 2024-11-20T15:23:17,785 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/eb1fca155e5448acb65a8e9d6a377d62 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/eb1fca155e5448acb65a8e9d6a377d62 2024-11-20T15:23:17,797 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/B of 558f4ab8fec4b16865ece4459134acf9 into eb1fca155e5448acb65a8e9d6a377d62(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:17,797 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:17,797 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/A of 558f4ab8fec4b16865ece4459134acf9 into b6a1a0271b9b44239c698e16eef4ff47(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:17,798 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/B, priority=13, startTime=1732116197354; duration=0sec 2024-11-20T15:23:17,798 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:17,798 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/A, priority=13, startTime=1732116197354; duration=0sec 2024-11-20T15:23:17,798 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:17,798 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:B 2024-11-20T15:23:17,798 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:17,798 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:A 2024-11-20T15:23:17,798 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:17,798 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37093 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:17,798 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/C is initiating minor compaction (all files) 2024-11-20T15:23:17,799 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/C in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:17,799 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/f1cae391da904884bc0991b7773c9aec, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/f4dd69e4e9324ccd82086471fa71c5e7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/17426c75cb344f26aed014d74bad4d44] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=36.2 K 2024-11-20T15:23:17,799 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting f1cae391da904884bc0991b7773c9aec, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1732116193997 2024-11-20T15:23:17,799 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting f4dd69e4e9324ccd82086471fa71c5e7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732116194169 2024-11-20T15:23:17,799 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 17426c75cb344f26aed014d74bad4d44, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1732116195717 2024-11-20T15:23:17,805 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#C#compaction#492 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:17,805 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/188718a7414843dbabfe99bccd85ce0a is 50, key is test_row_0/C:col10/1732116195717/Put/seqid=0 2024-11-20T15:23:17,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742395_1571 (size=12643) 2024-11-20T15:23:17,812 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/188718a7414843dbabfe99bccd85ce0a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/188718a7414843dbabfe99bccd85ce0a 2024-11-20T15:23:17,816 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/C of 558f4ab8fec4b16865ece4459134acf9 into 188718a7414843dbabfe99bccd85ce0a(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:17,816 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:17,816 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/C, priority=13, startTime=1732116197354; duration=0sec 2024-11-20T15:23:17,816 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:17,816 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:C 2024-11-20T15:23:17,877 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:17,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116257875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:17,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:17,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116257881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:17,943 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/4f78b7c9fc9e4693872eb5f23e2f68ec 2024-11-20T15:23:17,950 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/3f70e1e2090d4e089ea88882732ed144 is 50, key is test_row_0/B:col10/1732116197533/Put/seqid=0 2024-11-20T15:23:17,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742396_1572 (size=12301) 2024-11-20T15:23:18,183 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:18,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116258181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:18,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:18,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116258184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:18,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:18,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116258310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:18,312 DEBUG [Thread-2212 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4142 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., hostname=0b62285ead89,33387,1732116069954, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:23:18,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:18,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116258315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:18,318 DEBUG [Thread-2218 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4146 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., hostname=0b62285ead89,33387,1732116069954, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:23:18,323 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:18,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116258321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:18,324 DEBUG [Thread-2214 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4153 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., hostname=0b62285ead89,33387,1732116069954, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:23:18,354 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/3f70e1e2090d4e089ea88882732ed144 2024-11-20T15:23:18,360 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/29aca2294f4a4581b214883853200c6b is 50, key is test_row_0/C:col10/1732116197533/Put/seqid=0 2024-11-20T15:23:18,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742397_1573 (size=12301) 2024-11-20T15:23:18,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T15:23:18,453 INFO [Thread-2222 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-11-20T15:23:18,454 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:23:18,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees 2024-11-20T15:23:18,456 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:23:18,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-20T15:23:18,457 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:23:18,457 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:23:18,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-20T15:23:18,609 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:18,609 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-20T15:23:18,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:18,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:18,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:18,610 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:18,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:18,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:18,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:18,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116258687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:18,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:18,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116258691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:18,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-20T15:23:18,762 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:18,763 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-20T15:23:18,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:18,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:18,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:18,763 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:18,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:18,764 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/29aca2294f4a4581b214883853200c6b 2024-11-20T15:23:18,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:18,768 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/4f78b7c9fc9e4693872eb5f23e2f68ec as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/4f78b7c9fc9e4693872eb5f23e2f68ec 2024-11-20T15:23:18,771 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/4f78b7c9fc9e4693872eb5f23e2f68ec, entries=200, sequenceid=324, filesize=14.4 K 2024-11-20T15:23:18,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/3f70e1e2090d4e089ea88882732ed144 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/3f70e1e2090d4e089ea88882732ed144 2024-11-20T15:23:18,775 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/3f70e1e2090d4e089ea88882732ed144, entries=150, sequenceid=324, filesize=12.0 K 2024-11-20T15:23:18,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/29aca2294f4a4581b214883853200c6b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/29aca2294f4a4581b214883853200c6b 2024-11-20T15:23:18,787 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/29aca2294f4a4581b214883853200c6b, entries=150, sequenceid=324, filesize=12.0 K 2024-11-20T15:23:18,788 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 558f4ab8fec4b16865ece4459134acf9 in 1253ms, sequenceid=324, compaction requested=false 2024-11-20T15:23:18,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:18,915 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:18,916 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-20T15:23:18,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:18,916 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T15:23:18,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:18,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:18,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:18,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:18,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:18,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:18,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/a77e469303d1449ab6e46d92e2710a15 is 50, key is test_row_0/A:col10/1732116197560/Put/seqid=0 2024-11-20T15:23:18,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742398_1574 (size=12301) 2024-11-20T15:23:18,926 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/a77e469303d1449ab6e46d92e2710a15 2024-11-20T15:23:18,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/1e9b5cfc159148f6991067f147a8c19f is 50, key is test_row_0/B:col10/1732116197560/Put/seqid=0 2024-11-20T15:23:18,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742399_1575 (size=12301) 2024-11-20T15:23:19,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-20T15:23:19,337 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/1e9b5cfc159148f6991067f147a8c19f 2024-11-20T15:23:19,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/497a3af68f4541a2a1d8ed1146f791fc is 50, key is test_row_0/C:col10/1732116197560/Put/seqid=0 2024-11-20T15:23:19,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742400_1576 (size=12301) 2024-11-20T15:23:19,349 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/497a3af68f4541a2a1d8ed1146f791fc 2024-11-20T15:23:19,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/a77e469303d1449ab6e46d92e2710a15 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/a77e469303d1449ab6e46d92e2710a15 2024-11-20T15:23:19,361 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/a77e469303d1449ab6e46d92e2710a15, entries=150, sequenceid=341, filesize=12.0 K 2024-11-20T15:23:19,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/1e9b5cfc159148f6991067f147a8c19f as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/1e9b5cfc159148f6991067f147a8c19f 2024-11-20T15:23:19,366 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/1e9b5cfc159148f6991067f147a8c19f, entries=150, sequenceid=341, filesize=12.0 K 2024-11-20T15:23:19,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/497a3af68f4541a2a1d8ed1146f791fc as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/497a3af68f4541a2a1d8ed1146f791fc 2024-11-20T15:23:19,370 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/497a3af68f4541a2a1d8ed1146f791fc, entries=150, sequenceid=341, filesize=12.0 K 2024-11-20T15:23:19,371 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=0 B/0 for 558f4ab8fec4b16865ece4459134acf9 in 455ms, sequenceid=341, compaction requested=true 2024-11-20T15:23:19,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:19,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:19,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-11-20T15:23:19,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=149 2024-11-20T15:23:19,373 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-11-20T15:23:19,373 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 915 msec 2024-11-20T15:23:19,374 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees in 920 msec 2024-11-20T15:23:19,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-20T15:23:19,560 INFO [Thread-2222 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-11-20T15:23:19,561 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:23:19,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=150, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees 2024-11-20T15:23:19,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-20T15:23:19,563 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=150, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:23:19,563 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=150, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:23:19,563 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:23:19,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-20T15:23:19,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:19,711 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T15:23:19,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:19,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:19,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:19,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:19,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:19,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:19,715 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:19,715 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-11-20T15:23:19,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:19,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:19,715 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/3cd13038ffc242ec8b262f9a81df9374 is 50, key is test_row_0/A:col10/1732116199711/Put/seqid=0 2024-11-20T15:23:19,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:19,715 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:19,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:19,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:19,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742401_1577 (size=19621) 2024-11-20T15:23:19,769 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:19,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116259765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:19,773 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:19,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116259769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:19,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-20T15:23:19,867 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:19,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-11-20T15:23:19,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:19,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:19,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:19,868 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:19,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:19,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:19,877 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:19,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116259871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:19,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:19,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116259874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:20,019 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:20,020 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-11-20T15:23:20,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:20,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:20,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:20,020 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:20,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:20,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:20,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:20,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116260080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:20,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:20,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116260080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:20,120 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/3cd13038ffc242ec8b262f9a81df9374 2024-11-20T15:23:20,126 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/64e48c174a04429e8dbd9cd0c098495d is 50, key is test_row_0/B:col10/1732116199711/Put/seqid=0 2024-11-20T15:23:20,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742402_1578 (size=12301) 2024-11-20T15:23:20,130 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/64e48c174a04429e8dbd9cd0c098495d 2024-11-20T15:23:20,136 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/9d1aeae5f1d747a8b46aee902bd18e1e is 50, key is test_row_0/C:col10/1732116199711/Put/seqid=0 2024-11-20T15:23:20,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742403_1579 (size=12301) 2024-11-20T15:23:20,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-20T15:23:20,172 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:20,173 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-11-20T15:23:20,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:20,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:20,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:20,173 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:20,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:20,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:20,325 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:20,326 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-11-20T15:23:20,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:20,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:20,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:20,326 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:20,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:20,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:20,386 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:20,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116260382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:20,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:20,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116260385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:20,478 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:20,479 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-11-20T15:23:20,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:20,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:20,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:20,479 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:20,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:20,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:20,544 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/9d1aeae5f1d747a8b46aee902bd18e1e 2024-11-20T15:23:20,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/3cd13038ffc242ec8b262f9a81df9374 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/3cd13038ffc242ec8b262f9a81df9374 2024-11-20T15:23:20,551 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/3cd13038ffc242ec8b262f9a81df9374, entries=300, sequenceid=352, filesize=19.2 K 2024-11-20T15:23:20,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/64e48c174a04429e8dbd9cd0c098495d as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/64e48c174a04429e8dbd9cd0c098495d 2024-11-20T15:23:20,555 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/64e48c174a04429e8dbd9cd0c098495d, entries=150, sequenceid=352, filesize=12.0 K 2024-11-20T15:23:20,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/9d1aeae5f1d747a8b46aee902bd18e1e as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/9d1aeae5f1d747a8b46aee902bd18e1e 2024-11-20T15:23:20,559 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/9d1aeae5f1d747a8b46aee902bd18e1e, entries=150, sequenceid=352, filesize=12.0 K 2024-11-20T15:23:20,560 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 558f4ab8fec4b16865ece4459134acf9 in 848ms, sequenceid=352, compaction requested=true 2024-11-20T15:23:20,560 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:20,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:23:20,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:20,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:23:20,560 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:23:20,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:20,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:23:20,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:20,560 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:23:20,561 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 59306 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:23:20,561 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/A is initiating minor compaction (all files) 2024-11-20T15:23:20,561 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49546 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:23:20,561 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/B is initiating minor compaction (all files) 2024-11-20T15:23:20,561 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/A in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:20,561 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/B in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:20,561 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/b6a1a0271b9b44239c698e16eef4ff47, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/4f78b7c9fc9e4693872eb5f23e2f68ec, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/a77e469303d1449ab6e46d92e2710a15, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/3cd13038ffc242ec8b262f9a81df9374] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=57.9 K 2024-11-20T15:23:20,561 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/eb1fca155e5448acb65a8e9d6a377d62, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/3f70e1e2090d4e089ea88882732ed144, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/1e9b5cfc159148f6991067f147a8c19f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/64e48c174a04429e8dbd9cd0c098495d] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=48.4 K 2024-11-20T15:23:20,561 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting eb1fca155e5448acb65a8e9d6a377d62, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1732116195717 2024-11-20T15:23:20,561 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6a1a0271b9b44239c698e16eef4ff47, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1732116195717 2024-11-20T15:23:20,562 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f70e1e2090d4e089ea88882732ed144, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1732116196869 2024-11-20T15:23:20,562 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4f78b7c9fc9e4693872eb5f23e2f68ec, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1732116196869 2024-11-20T15:23:20,562 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting a77e469303d1449ab6e46d92e2710a15, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1732116197555 2024-11-20T15:23:20,562 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e9b5cfc159148f6991067f147a8c19f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1732116197555 2024-11-20T15:23:20,564 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 64e48c174a04429e8dbd9cd0c098495d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1732116199703 2024-11-20T15:23:20,564 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3cd13038ffc242ec8b262f9a81df9374, keycount=300, bloomtype=ROW, size=19.2 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1732116199701 2024-11-20T15:23:20,571 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#B#compaction#501 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:20,572 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/c109e73aae9f4e3e90f1f69edc4c899b is 50, key is test_row_0/B:col10/1732116199711/Put/seqid=0 2024-11-20T15:23:20,572 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#A#compaction#502 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:20,573 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/461ccb5d406348819d710b220c0de879 is 50, key is test_row_0/A:col10/1732116199711/Put/seqid=0 2024-11-20T15:23:20,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742404_1580 (size=12779) 2024-11-20T15:23:20,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742405_1581 (size=12779) 2024-11-20T15:23:20,631 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:20,631 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-11-20T15:23:20,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:20,632 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T15:23:20,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:20,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:20,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:20,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:20,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:20,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:20,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/5b6367ac5ad4407fb280f6582d7302cb is 50, key is test_row_0/A:col10/1732116199752/Put/seqid=0 2024-11-20T15:23:20,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742406_1582 (size=12301) 2024-11-20T15:23:20,641 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/5b6367ac5ad4407fb280f6582d7302cb 2024-11-20T15:23:20,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/d4ea1e7b773a47c1b41185326b4c0aac is 50, key is test_row_0/B:col10/1732116199752/Put/seqid=0 2024-11-20T15:23:20,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742407_1583 (size=12301) 2024-11-20T15:23:20,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-20T15:23:20,895 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:20,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:20,919 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:20,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116260914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:20,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:20,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116260915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:20,981 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/461ccb5d406348819d710b220c0de879 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/461ccb5d406348819d710b220c0de879 2024-11-20T15:23:20,981 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/c109e73aae9f4e3e90f1f69edc4c899b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/c109e73aae9f4e3e90f1f69edc4c899b 2024-11-20T15:23:20,984 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/A of 558f4ab8fec4b16865ece4459134acf9 into 461ccb5d406348819d710b220c0de879(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:20,984 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/B of 558f4ab8fec4b16865ece4459134acf9 into c109e73aae9f4e3e90f1f69edc4c899b(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:20,984 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:20,984 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:20,985 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/B, priority=12, startTime=1732116200560; duration=0sec 2024-11-20T15:23:20,985 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/A, priority=12, startTime=1732116200560; duration=0sec 2024-11-20T15:23:20,985 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:20,985 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:A 2024-11-20T15:23:20,985 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:20,985 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:B 2024-11-20T15:23:20,985 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:23:20,985 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49546 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:23:20,986 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/C is initiating minor compaction (all files) 2024-11-20T15:23:20,986 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/C in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:20,986 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/188718a7414843dbabfe99bccd85ce0a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/29aca2294f4a4581b214883853200c6b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/497a3af68f4541a2a1d8ed1146f791fc, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/9d1aeae5f1d747a8b46aee902bd18e1e] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=48.4 K 2024-11-20T15:23:20,986 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 188718a7414843dbabfe99bccd85ce0a, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1732116195717 2024-11-20T15:23:20,986 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29aca2294f4a4581b214883853200c6b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1732116196869 2024-11-20T15:23:20,986 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 497a3af68f4541a2a1d8ed1146f791fc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1732116197555 2024-11-20T15:23:20,986 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9d1aeae5f1d747a8b46aee902bd18e1e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1732116199703 2024-11-20T15:23:20,993 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#C#compaction#505 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:20,993 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/a3c86d0366c444a2a91c8494aa13f08c is 50, key is test_row_0/C:col10/1732116199711/Put/seqid=0 2024-11-20T15:23:20,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742408_1584 (size=12779) 2024-11-20T15:23:21,023 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:21,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116261020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:21,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:21,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116261021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:21,052 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/d4ea1e7b773a47c1b41185326b4c0aac 2024-11-20T15:23:21,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/3fa89a8b144c4bce83eaf5dd159a4f88 is 50, key is test_row_0/C:col10/1732116199752/Put/seqid=0 2024-11-20T15:23:21,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742409_1585 (size=12301) 2024-11-20T15:23:21,063 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/3fa89a8b144c4bce83eaf5dd159a4f88 2024-11-20T15:23:21,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/5b6367ac5ad4407fb280f6582d7302cb as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/5b6367ac5ad4407fb280f6582d7302cb 2024-11-20T15:23:21,070 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/5b6367ac5ad4407fb280f6582d7302cb, entries=150, sequenceid=377, filesize=12.0 K 2024-11-20T15:23:21,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/d4ea1e7b773a47c1b41185326b4c0aac as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/d4ea1e7b773a47c1b41185326b4c0aac 2024-11-20T15:23:21,074 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/d4ea1e7b773a47c1b41185326b4c0aac, entries=150, sequenceid=377, filesize=12.0 K 2024-11-20T15:23:21,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/3fa89a8b144c4bce83eaf5dd159a4f88 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/3fa89a8b144c4bce83eaf5dd159a4f88 2024-11-20T15:23:21,078 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/3fa89a8b144c4bce83eaf5dd159a4f88, entries=150, sequenceid=377, filesize=12.0 K 2024-11-20T15:23:21,078 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 558f4ab8fec4b16865ece4459134acf9 in 446ms, sequenceid=377, compaction requested=false 2024-11-20T15:23:21,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:21,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:21,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=151 2024-11-20T15:23:21,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=151 2024-11-20T15:23:21,080 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-11-20T15:23:21,081 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5160 sec 2024-11-20T15:23:21,082 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees in 1.5200 sec 2024-11-20T15:23:21,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:21,229 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T15:23:21,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:21,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:21,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:21,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:21,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:21,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:21,233 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/d3675e12e65d469e8e3668961b6dc023 is 50, key is test_row_0/A:col10/1732116201228/Put/seqid=0 2024-11-20T15:23:21,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742410_1586 (size=17181) 2024-11-20T15:23:21,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:21,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116261280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:21,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:21,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116261282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:21,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:21,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116261386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:21,393 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:21,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116261390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:21,401 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/a3c86d0366c444a2a91c8494aa13f08c as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/a3c86d0366c444a2a91c8494aa13f08c 2024-11-20T15:23:21,406 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/C of 558f4ab8fec4b16865ece4459134acf9 into a3c86d0366c444a2a91c8494aa13f08c(size=12.5 K), total size for store is 24.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:21,406 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:21,406 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/C, priority=12, startTime=1732116200560; duration=0sec 2024-11-20T15:23:21,406 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:21,406 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:C 2024-11-20T15:23:21,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:21,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116261590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:21,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:21,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116261596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:21,638 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/d3675e12e65d469e8e3668961b6dc023 2024-11-20T15:23:21,668 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/44203175a0034cd486e755a846ff5572 is 50, key is test_row_0/B:col10/1732116201228/Put/seqid=0 2024-11-20T15:23:21,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-20T15:23:21,669 INFO [Thread-2222 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 150 completed 2024-11-20T15:23:21,671 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:23:21,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees 2024-11-20T15:23:21,674 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=152, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:23:21,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-20T15:23:21,674 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=152, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:23:21,674 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:23:21,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742411_1587 (size=12301) 2024-11-20T15:23:21,706 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/44203175a0034cd486e755a846ff5572 2024-11-20T15:23:21,715 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/dcf43b256cd5417db04512cabe2c8d22 is 50, key is test_row_0/C:col10/1732116201228/Put/seqid=0 2024-11-20T15:23:21,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-20T15:23:21,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742412_1588 (size=12301) 2024-11-20T15:23:21,791 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/dcf43b256cd5417db04512cabe2c8d22 2024-11-20T15:23:21,798 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/d3675e12e65d469e8e3668961b6dc023 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/d3675e12e65d469e8e3668961b6dc023 2024-11-20T15:23:21,803 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/d3675e12e65d469e8e3668961b6dc023, entries=250, sequenceid=392, filesize=16.8 K 2024-11-20T15:23:21,804 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/44203175a0034cd486e755a846ff5572 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/44203175a0034cd486e755a846ff5572 2024-11-20T15:23:21,809 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/44203175a0034cd486e755a846ff5572, entries=150, sequenceid=392, filesize=12.0 K 2024-11-20T15:23:21,810 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/dcf43b256cd5417db04512cabe2c8d22 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/dcf43b256cd5417db04512cabe2c8d22 2024-11-20T15:23:21,817 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/dcf43b256cd5417db04512cabe2c8d22, entries=150, sequenceid=392, filesize=12.0 K 2024-11-20T15:23:21,818 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 558f4ab8fec4b16865ece4459134acf9 in 589ms, sequenceid=392, compaction requested=true 2024-11-20T15:23:21,818 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:21,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:23:21,818 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:21,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:21,819 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:21,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:23:21,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:21,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:23:21,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:21,823 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42261 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:21,823 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/A is initiating minor compaction (all files) 2024-11-20T15:23:21,823 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/A in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:21,823 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/461ccb5d406348819d710b220c0de879, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/5b6367ac5ad4407fb280f6582d7302cb, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/d3675e12e65d469e8e3668961b6dc023] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=41.3 K 2024-11-20T15:23:21,824 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 461ccb5d406348819d710b220c0de879, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1732116199703 2024-11-20T15:23:21,825 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37381 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:21,825 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/B is initiating minor compaction (all files) 2024-11-20T15:23:21,825 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/B in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:21,825 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/c109e73aae9f4e3e90f1f69edc4c899b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/d4ea1e7b773a47c1b41185326b4c0aac, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/44203175a0034cd486e755a846ff5572] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=36.5 K 2024-11-20T15:23:21,825 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b6367ac5ad4407fb280f6582d7302cb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732116199752 2024-11-20T15:23:21,825 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3675e12e65d469e8e3668961b6dc023, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1732116200908 2024-11-20T15:23:21,826 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting c109e73aae9f4e3e90f1f69edc4c899b, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1732116199703 2024-11-20T15:23:21,827 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting d4ea1e7b773a47c1b41185326b4c0aac, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732116199752 2024-11-20T15:23:21,827 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:21,828 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-11-20T15:23:21,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:21,828 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T15:23:21,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:21,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:21,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:21,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:21,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:21,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:21,829 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 44203175a0034cd486e755a846ff5572, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1732116200908 2024-11-20T15:23:21,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/2199d03af1804ce29bdb890d2e5d713b is 50, key is test_row_0/A:col10/1732116201267/Put/seqid=0 2024-11-20T15:23:21,862 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#A#compaction#511 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:21,863 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/9e586412d1584c1ea3a56e798d197ceb is 50, key is test_row_0/A:col10/1732116201228/Put/seqid=0 2024-11-20T15:23:21,866 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#B#compaction#512 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:21,867 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/aca33a33ff3f4c4ca68ebae20fd30296 is 50, key is test_row_0/B:col10/1732116201228/Put/seqid=0 2024-11-20T15:23:21,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:21,904 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:21,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742413_1589 (size=12301) 2024-11-20T15:23:21,911 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/2199d03af1804ce29bdb890d2e5d713b 2024-11-20T15:23:21,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742414_1590 (size=12881) 2024-11-20T15:23:21,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/e7e0859b7df94e72bda81042ad42b5eb is 50, key is test_row_0/B:col10/1732116201267/Put/seqid=0 2024-11-20T15:23:21,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:21,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116261936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:21,944 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/9e586412d1584c1ea3a56e798d197ceb as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/9e586412d1584c1ea3a56e798d197ceb 2024-11-20T15:23:21,952 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/A of 558f4ab8fec4b16865ece4459134acf9 into 9e586412d1584c1ea3a56e798d197ceb(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:21,952 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:21,952 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/A, priority=13, startTime=1732116201818; duration=0sec 2024-11-20T15:23:21,952 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:21,952 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:A 2024-11-20T15:23:21,952 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:21,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:21,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116261939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:21,954 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37381 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:21,954 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/C is initiating minor compaction (all files) 2024-11-20T15:23:21,955 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/C in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:21,955 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/a3c86d0366c444a2a91c8494aa13f08c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/3fa89a8b144c4bce83eaf5dd159a4f88, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/dcf43b256cd5417db04512cabe2c8d22] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=36.5 K 2024-11-20T15:23:21,955 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting a3c86d0366c444a2a91c8494aa13f08c, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1732116199703 2024-11-20T15:23:21,956 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3fa89a8b144c4bce83eaf5dd159a4f88, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732116199752 2024-11-20T15:23:21,957 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting dcf43b256cd5417db04512cabe2c8d22, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1732116200908 2024-11-20T15:23:21,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742415_1591 (size=12881) 2024-11-20T15:23:21,975 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/aca33a33ff3f4c4ca68ebae20fd30296 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/aca33a33ff3f4c4ca68ebae20fd30296 2024-11-20T15:23:21,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-20T15:23:21,984 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#C#compaction#514 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:21,985 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/43422248e70e4f9b9768cf3390150773 is 50, key is test_row_0/C:col10/1732116201228/Put/seqid=0 2024-11-20T15:23:21,993 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/B of 558f4ab8fec4b16865ece4459134acf9 into aca33a33ff3f4c4ca68ebae20fd30296(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:21,993 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:21,993 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/B, priority=13, startTime=1732116201819; duration=0sec 2024-11-20T15:23:21,993 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:21,993 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:B 2024-11-20T15:23:22,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742416_1592 (size=12301) 2024-11-20T15:23:22,014 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/e7e0859b7df94e72bda81042ad42b5eb 2024-11-20T15:23:22,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742417_1593 (size=12881) 2024-11-20T15:23:22,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/b926ded073b3496eb61538118234f520 is 50, key is test_row_0/C:col10/1732116201267/Put/seqid=0 2024-11-20T15:23:22,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:22,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116262046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:22,061 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:22,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116262055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:22,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742418_1594 (size=12301) 2024-11-20T15:23:22,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:22,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116262255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:22,269 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:22,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116262263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:22,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-20T15:23:22,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:22,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41432 deadline: 1732116262346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:22,351 DEBUG [Thread-2214 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8181 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., hostname=0b62285ead89,33387,1732116069954, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:23:22,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:22,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41496 deadline: 1732116262351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:22,360 DEBUG [Thread-2212 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8189 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., hostname=0b62285ead89,33387,1732116069954, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:23:22,362 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:22,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41482 deadline: 1732116262357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:22,363 DEBUG [Thread-2218 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8192 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., hostname=0b62285ead89,33387,1732116069954, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:23:22,439 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/43422248e70e4f9b9768cf3390150773 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/43422248e70e4f9b9768cf3390150773 2024-11-20T15:23:22,444 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/C of 558f4ab8fec4b16865ece4459134acf9 into 43422248e70e4f9b9768cf3390150773(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:22,445 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:22,445 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/C, priority=13, startTime=1732116201819; duration=0sec 2024-11-20T15:23:22,445 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:22,445 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:C 2024-11-20T15:23:22,490 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/b926ded073b3496eb61538118234f520 2024-11-20T15:23:22,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/2199d03af1804ce29bdb890d2e5d713b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/2199d03af1804ce29bdb890d2e5d713b 2024-11-20T15:23:22,499 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/2199d03af1804ce29bdb890d2e5d713b, entries=150, sequenceid=416, filesize=12.0 K 2024-11-20T15:23:22,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/e7e0859b7df94e72bda81042ad42b5eb as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/e7e0859b7df94e72bda81042ad42b5eb 2024-11-20T15:23:22,505 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/e7e0859b7df94e72bda81042ad42b5eb, entries=150, sequenceid=416, filesize=12.0 K 2024-11-20T15:23:22,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/b926ded073b3496eb61538118234f520 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/b926ded073b3496eb61538118234f520 2024-11-20T15:23:22,510 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/b926ded073b3496eb61538118234f520, entries=150, sequenceid=416, filesize=12.0 K 2024-11-20T15:23:22,511 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 558f4ab8fec4b16865ece4459134acf9 in 683ms, sequenceid=416, compaction requested=false 2024-11-20T15:23:22,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:22,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:22,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=153 2024-11-20T15:23:22,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=153 2024-11-20T15:23:22,522 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-11-20T15:23:22,522 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 844 msec 2024-11-20T15:23:22,525 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees in 852 msec 2024-11-20T15:23:22,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:22,567 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T15:23:22,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:22,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:22,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:22,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:22,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:22,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:22,582 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/85c50ded8bf9487f93b1e83c0937fd79 is 50, key is test_row_0/A:col10/1732116201930/Put/seqid=0 2024-11-20T15:23:22,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742419_1595 (size=14741) 2024-11-20T15:23:22,654 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:22,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116262646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:22,654 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:22,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116262647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:22,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:22,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116262756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:22,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:22,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116262756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:22,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-20T15:23:22,777 INFO [Thread-2222 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 152 completed 2024-11-20T15:23:22,779 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:23:22,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=154, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees 2024-11-20T15:23:22,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-20T15:23:22,780 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=154, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:23:22,781 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=154, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:23:22,781 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=155, ppid=154, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:23:22,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-20T15:23:22,932 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:22,933 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-11-20T15:23:22,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:22,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:22,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:22,933 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:22,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:22,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:22,971 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:22,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116262965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:22,972 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:22,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116262965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:23,024 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=433 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/85c50ded8bf9487f93b1e83c0937fd79 2024-11-20T15:23:23,045 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/0f76828d9cc846fea2cd9591f557fa99 is 50, key is test_row_0/B:col10/1732116201930/Put/seqid=0 2024-11-20T15:23:23,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742420_1596 (size=12301) 2024-11-20T15:23:23,062 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=433 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/0f76828d9cc846fea2cd9591f557fa99 2024-11-20T15:23:23,075 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/0bf4d90058d74d90bd33a982899657ae is 50, key is test_row_0/C:col10/1732116201930/Put/seqid=0 2024-11-20T15:23:23,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-20T15:23:23,087 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:23,088 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-11-20T15:23:23,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:23,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:23,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:23,089 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:23,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:23,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:23,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742421_1597 (size=12301) 2024-11-20T15:23:23,116 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=433 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/0bf4d90058d74d90bd33a982899657ae 2024-11-20T15:23:23,125 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/85c50ded8bf9487f93b1e83c0937fd79 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/85c50ded8bf9487f93b1e83c0937fd79 2024-11-20T15:23:23,136 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/85c50ded8bf9487f93b1e83c0937fd79, entries=200, sequenceid=433, filesize=14.4 K 2024-11-20T15:23:23,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/0f76828d9cc846fea2cd9591f557fa99 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/0f76828d9cc846fea2cd9591f557fa99 2024-11-20T15:23:23,144 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/0f76828d9cc846fea2cd9591f557fa99, entries=150, sequenceid=433, filesize=12.0 K 2024-11-20T15:23:23,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/0bf4d90058d74d90bd33a982899657ae as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/0bf4d90058d74d90bd33a982899657ae 2024-11-20T15:23:23,149 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/0bf4d90058d74d90bd33a982899657ae, entries=150, sequenceid=433, filesize=12.0 K 2024-11-20T15:23:23,150 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 558f4ab8fec4b16865ece4459134acf9 in 583ms, sequenceid=433, compaction requested=true 2024-11-20T15:23:23,150 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:23,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:23:23,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:23,151 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:23,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:23:23,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:23,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:23:23,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:23,151 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:23,152 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39923 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:23,152 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/A is initiating minor compaction (all files) 2024-11-20T15:23:23,152 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/A in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:23,152 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/9e586412d1584c1ea3a56e798d197ceb, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/2199d03af1804ce29bdb890d2e5d713b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/85c50ded8bf9487f93b1e83c0937fd79] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=39.0 K 2024-11-20T15:23:23,152 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37483 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:23,152 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e586412d1584c1ea3a56e798d197ceb, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1732116200908 2024-11-20T15:23:23,152 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/B is initiating minor compaction (all files) 2024-11-20T15:23:23,152 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/B in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:23,152 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/aca33a33ff3f4c4ca68ebae20fd30296, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/e7e0859b7df94e72bda81042ad42b5eb, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/0f76828d9cc846fea2cd9591f557fa99] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=36.6 K 2024-11-20T15:23:23,153 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2199d03af1804ce29bdb890d2e5d713b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732116201267 2024-11-20T15:23:23,153 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting aca33a33ff3f4c4ca68ebae20fd30296, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1732116200908 2024-11-20T15:23:23,153 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 85c50ded8bf9487f93b1e83c0937fd79, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1732116201929 2024-11-20T15:23:23,153 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting e7e0859b7df94e72bda81042ad42b5eb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732116201267 2024-11-20T15:23:23,154 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f76828d9cc846fea2cd9591f557fa99, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1732116201930 2024-11-20T15:23:23,173 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#A#compaction#519 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:23,174 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/d336e72cfd59436dacc7b57d491db460 is 50, key is test_row_0/A:col10/1732116201930/Put/seqid=0 2024-11-20T15:23:23,186 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#B#compaction#520 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:23,186 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/b949c64793ed4b1b9b33ea2dd30cf717 is 50, key is test_row_0/B:col10/1732116201930/Put/seqid=0 2024-11-20T15:23:23,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742422_1598 (size=12983) 2024-11-20T15:23:23,242 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:23,242 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-11-20T15:23:23,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:23,243 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T15:23:23,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:23,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:23,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:23,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:23,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:23,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:23,247 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/d336e72cfd59436dacc7b57d491db460 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/d336e72cfd59436dacc7b57d491db460 2024-11-20T15:23:23,255 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/A of 558f4ab8fec4b16865ece4459134acf9 into d336e72cfd59436dacc7b57d491db460(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:23,255 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:23,255 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/A, priority=13, startTime=1732116203150; duration=0sec 2024-11-20T15:23:23,255 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:23,255 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:A 2024-11-20T15:23:23,255 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:23,258 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37483 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:23,258 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/C is initiating minor compaction (all files) 2024-11-20T15:23:23,258 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/C in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:23,258 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/43422248e70e4f9b9768cf3390150773, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/b926ded073b3496eb61538118234f520, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/0bf4d90058d74d90bd33a982899657ae] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=36.6 K 2024-11-20T15:23:23,259 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43422248e70e4f9b9768cf3390150773, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1732116200908 2024-11-20T15:23:23,259 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting b926ded073b3496eb61538118234f520, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732116201267 2024-11-20T15:23:23,260 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0bf4d90058d74d90bd33a982899657ae, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1732116201930 2024-11-20T15:23:23,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/7ab69ca82fd7478fa4af9db606cd0d7a is 50, key is test_row_0/A:col10/1732116202617/Put/seqid=0 2024-11-20T15:23:23,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742423_1599 (size=12983) 2024-11-20T15:23:23,268 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#C#compaction#522 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:23,268 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/e82bab4b828e4d5c85f56bcddc3ea242 is 50, key is test_row_0/C:col10/1732116201930/Put/seqid=0 2024-11-20T15:23:23,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:23,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:23,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742425_1601 (size=12983) 2024-11-20T15:23:23,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742424_1600 (size=12301) 2024-11-20T15:23:23,319 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/7ab69ca82fd7478fa4af9db606cd0d7a 2024-11-20T15:23:23,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:23,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116263319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:23,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:23,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116263323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:23,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/a8c2e8edeaef4adc83a0592bc394328b is 50, key is test_row_0/B:col10/1732116202617/Put/seqid=0 2024-11-20T15:23:23,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742426_1602 (size=12301) 2024-11-20T15:23:23,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-20T15:23:23,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:23,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116263431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:23,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:23,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116263433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:23,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:23,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116263639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:23,646 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:23,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116263641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:23,670 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/b949c64793ed4b1b9b33ea2dd30cf717 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/b949c64793ed4b1b9b33ea2dd30cf717 2024-11-20T15:23:23,679 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/B of 558f4ab8fec4b16865ece4459134acf9 into b949c64793ed4b1b9b33ea2dd30cf717(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:23,679 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:23,679 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/B, priority=13, startTime=1732116203151; duration=0sec 2024-11-20T15:23:23,679 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:23,679 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:B 2024-11-20T15:23:23,713 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/e82bab4b828e4d5c85f56bcddc3ea242 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/e82bab4b828e4d5c85f56bcddc3ea242 2024-11-20T15:23:23,719 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/C of 558f4ab8fec4b16865ece4459134acf9 into e82bab4b828e4d5c85f56bcddc3ea242(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:23,719 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:23,719 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/C, priority=13, startTime=1732116203151; duration=0sec 2024-11-20T15:23:23,719 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:23,719 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:C 2024-11-20T15:23:23,755 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/a8c2e8edeaef4adc83a0592bc394328b 2024-11-20T15:23:23,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/ca009649917243af9ad9c521c0a374d2 is 50, key is test_row_0/C:col10/1732116202617/Put/seqid=0 2024-11-20T15:23:23,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742427_1603 (size=12301) 2024-11-20T15:23:23,810 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/ca009649917243af9ad9c521c0a374d2 2024-11-20T15:23:23,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/7ab69ca82fd7478fa4af9db606cd0d7a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/7ab69ca82fd7478fa4af9db606cd0d7a 2024-11-20T15:23:23,826 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/7ab69ca82fd7478fa4af9db606cd0d7a, entries=150, sequenceid=455, filesize=12.0 K 2024-11-20T15:23:23,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/a8c2e8edeaef4adc83a0592bc394328b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/a8c2e8edeaef4adc83a0592bc394328b 2024-11-20T15:23:23,831 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/a8c2e8edeaef4adc83a0592bc394328b, entries=150, sequenceid=455, filesize=12.0 K 2024-11-20T15:23:23,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/ca009649917243af9ad9c521c0a374d2 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/ca009649917243af9ad9c521c0a374d2 2024-11-20T15:23:23,835 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/ca009649917243af9ad9c521c0a374d2, entries=150, sequenceid=455, filesize=12.0 K 2024-11-20T15:23:23,836 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 558f4ab8fec4b16865ece4459134acf9 in 593ms, sequenceid=455, compaction requested=false 2024-11-20T15:23:23,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:23,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:23,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=155 2024-11-20T15:23:23,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=155 2024-11-20T15:23:23,838 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=155, resume processing ppid=154 2024-11-20T15:23:23,838 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, ppid=154, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0560 sec 2024-11-20T15:23:23,840 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees in 1.0600 sec 2024-11-20T15:23:23,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-20T15:23:23,883 INFO [Thread-2222 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 154 completed 2024-11-20T15:23:23,886 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:23:23,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=156, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees 2024-11-20T15:23:23,887 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=156, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:23:23,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-11-20T15:23:23,888 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=156, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:23:23,888 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:23:23,952 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T15:23:23,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:23,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:23,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:23,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:23,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:23,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:23,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:23,957 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/3b1a95e5e102431782f35673f0b6fce6 is 50, key is test_row_0/A:col10/1732116203949/Put/seqid=0 2024-11-20T15:23:23,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-11-20T15:23:23,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742428_1604 (size=12301) 2024-11-20T15:23:23,991 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=473 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/3b1a95e5e102431782f35673f0b6fce6 2024-11-20T15:23:23,999 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/d5911631a6384a218561d00daab28551 is 50, key is test_row_0/B:col10/1732116203949/Put/seqid=0 2024-11-20T15:23:24,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742429_1605 (size=12301) 2024-11-20T15:23:24,031 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=473 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/d5911631a6384a218561d00daab28551 2024-11-20T15:23:24,040 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:24,040 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-11-20T15:23:24,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:24,040 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/24093ff731ea42d1a059b4d4fb5c731b is 50, key is test_row_0/C:col10/1732116203949/Put/seqid=0 2024-11-20T15:23:24,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:24,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:24,041 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:24,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:24,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:24,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:24,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116264041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:24,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:24,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116264043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:24,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742430_1606 (size=12301) 2024-11-20T15:23:24,155 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:24,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116264151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:24,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:24,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116264152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:24,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-11-20T15:23:24,193 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:24,193 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-11-20T15:23:24,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:24,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:24,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:24,194 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:24,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:24,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:24,346 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:24,346 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-11-20T15:23:24,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:24,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:24,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:24,347 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:24,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:24,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:24,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:24,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116264358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:24,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:24,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116264358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:24,479 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=473 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/24093ff731ea42d1a059b4d4fb5c731b 2024-11-20T15:23:24,485 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/3b1a95e5e102431782f35673f0b6fce6 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/3b1a95e5e102431782f35673f0b6fce6 2024-11-20T15:23:24,490 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/3b1a95e5e102431782f35673f0b6fce6, entries=150, sequenceid=473, filesize=12.0 K 2024-11-20T15:23:24,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-11-20T15:23:24,491 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/d5911631a6384a218561d00daab28551 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/d5911631a6384a218561d00daab28551 2024-11-20T15:23:24,494 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/d5911631a6384a218561d00daab28551, entries=150, sequenceid=473, filesize=12.0 K 2024-11-20T15:23:24,495 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/24093ff731ea42d1a059b4d4fb5c731b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/24093ff731ea42d1a059b4d4fb5c731b 2024-11-20T15:23:24,498 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/24093ff731ea42d1a059b4d4fb5c731b, entries=150, sequenceid=473, filesize=12.0 K 2024-11-20T15:23:24,499 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:24,499 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 558f4ab8fec4b16865ece4459134acf9 in 547ms, sequenceid=473, compaction requested=true 2024-11-20T15:23:24,499 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:24,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:23:24,499 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-11-20T15:23:24,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:24,500 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:24,500 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:24,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:24,500 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T15:23:24,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:24,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:24,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:24,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:24,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:24,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:24,501 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:24,501 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/B is initiating minor compaction (all files) 2024-11-20T15:23:24,501 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/B in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:24,501 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/b949c64793ed4b1b9b33ea2dd30cf717, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/a8c2e8edeaef4adc83a0592bc394328b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/d5911631a6384a218561d00daab28551] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=36.7 K 2024-11-20T15:23:24,501 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:24,501 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/A is initiating minor compaction (all files) 2024-11-20T15:23:24,501 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/A in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:24,501 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/d336e72cfd59436dacc7b57d491db460, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/7ab69ca82fd7478fa4af9db606cd0d7a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/3b1a95e5e102431782f35673f0b6fce6] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=36.7 K 2024-11-20T15:23:24,502 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting d336e72cfd59436dacc7b57d491db460, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1732116201930 2024-11-20T15:23:24,502 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting b949c64793ed4b1b9b33ea2dd30cf717, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1732116201930 2024-11-20T15:23:24,502 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7ab69ca82fd7478fa4af9db606cd0d7a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=455, earliestPutTs=1732116202617 2024-11-20T15:23:24,503 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting a8c2e8edeaef4adc83a0592bc394328b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=455, earliestPutTs=1732116202617 2024-11-20T15:23:24,503 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3b1a95e5e102431782f35673f0b6fce6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=473, earliestPutTs=1732116203299 2024-11-20T15:23:24,503 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting d5911631a6384a218561d00daab28551, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=473, earliestPutTs=1732116203299 2024-11-20T15:23:24,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:23:24,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:24,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:23:24,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:24,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/78ed119f4ca64724b8efd80c383b4f33 is 50, key is test_row_0/A:col10/1732116204038/Put/seqid=0 2024-11-20T15:23:24,518 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#A#compaction#529 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:24,518 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/12e11e2b10f64b389e96e36f8845d2ae is 50, key is test_row_0/A:col10/1732116203949/Put/seqid=0 2024-11-20T15:23:24,537 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#B#compaction#530 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:24,538 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/857801944923488e927761c4ceafe0be is 50, key is test_row_0/B:col10/1732116203949/Put/seqid=0 2024-11-20T15:23:24,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742432_1608 (size=13085) 2024-11-20T15:23:24,560 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/12e11e2b10f64b389e96e36f8845d2ae as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/12e11e2b10f64b389e96e36f8845d2ae 2024-11-20T15:23:24,565 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/A of 558f4ab8fec4b16865ece4459134acf9 into 12e11e2b10f64b389e96e36f8845d2ae(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:24,565 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:24,565 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/A, priority=13, startTime=1732116204499; duration=0sec 2024-11-20T15:23:24,565 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:24,565 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:A 2024-11-20T15:23:24,565 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:24,569 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:24,569 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/C is initiating minor compaction (all files) 2024-11-20T15:23:24,569 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/C in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:24,569 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/e82bab4b828e4d5c85f56bcddc3ea242, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/ca009649917243af9ad9c521c0a374d2, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/24093ff731ea42d1a059b4d4fb5c731b] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=36.7 K 2024-11-20T15:23:24,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742431_1607 (size=12301) 2024-11-20T15:23:24,569 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting e82bab4b828e4d5c85f56bcddc3ea242, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1732116201930 2024-11-20T15:23:24,570 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting ca009649917243af9ad9c521c0a374d2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=455, earliestPutTs=1732116202617 2024-11-20T15:23:24,570 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 24093ff731ea42d1a059b4d4fb5c731b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=473, earliestPutTs=1732116203299 2024-11-20T15:23:24,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742433_1609 (size=13085) 2024-11-20T15:23:24,582 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#C#compaction#531 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:24,583 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/c0fcb871bcbe4cd5a812ce6f69a78a08 is 50, key is test_row_0/C:col10/1732116203949/Put/seqid=0 2024-11-20T15:23:24,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742434_1610 (size=13085) 2024-11-20T15:23:24,611 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/c0fcb871bcbe4cd5a812ce6f69a78a08 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/c0fcb871bcbe4cd5a812ce6f69a78a08 2024-11-20T15:23:24,618 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/C of 558f4ab8fec4b16865ece4459134acf9 into c0fcb871bcbe4cd5a812ce6f69a78a08(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:24,618 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:24,618 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/C, priority=13, startTime=1732116204505; duration=0sec 2024-11-20T15:23:24,618 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:24,618 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:C 2024-11-20T15:23:24,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:24,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:24,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:24,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116264708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:24,718 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:24,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116264711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:24,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:24,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116264816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:24,824 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:24,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116264820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:24,970 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=494 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/78ed119f4ca64724b8efd80c383b4f33 2024-11-20T15:23:24,984 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/857801944923488e927761c4ceafe0be as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/857801944923488e927761c4ceafe0be 2024-11-20T15:23:24,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/b93f258eddd2489d96da9c23e9484dd7 is 50, key is test_row_0/B:col10/1732116204038/Put/seqid=0 2024-11-20T15:23:24,991 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/B of 558f4ab8fec4b16865ece4459134acf9 into 857801944923488e927761c4ceafe0be(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:24,991 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:24,991 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/B, priority=13, startTime=1732116204500; duration=0sec 2024-11-20T15:23:24,991 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:24,991 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:B 2024-11-20T15:23:24,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-11-20T15:23:25,028 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:25,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116265021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:25,028 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:25,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116265026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:25,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742435_1611 (size=12301) 2024-11-20T15:23:25,037 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=494 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/b93f258eddd2489d96da9c23e9484dd7 2024-11-20T15:23:25,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/d5b016dd176b492eb59942b18c3fc86e is 50, key is test_row_0/C:col10/1732116204038/Put/seqid=0 2024-11-20T15:23:25,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742436_1612 (size=12301) 2024-11-20T15:23:25,333 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:25,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116265330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:25,333 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:25,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116265331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:25,530 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=494 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/d5b016dd176b492eb59942b18c3fc86e 2024-11-20T15:23:25,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/78ed119f4ca64724b8efd80c383b4f33 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/78ed119f4ca64724b8efd80c383b4f33 2024-11-20T15:23:25,539 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/78ed119f4ca64724b8efd80c383b4f33, entries=150, sequenceid=494, filesize=12.0 K 2024-11-20T15:23:25,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/b93f258eddd2489d96da9c23e9484dd7 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/b93f258eddd2489d96da9c23e9484dd7 2024-11-20T15:23:25,549 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/b93f258eddd2489d96da9c23e9484dd7, entries=150, sequenceid=494, filesize=12.0 K 2024-11-20T15:23:25,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/d5b016dd176b492eb59942b18c3fc86e as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/d5b016dd176b492eb59942b18c3fc86e 2024-11-20T15:23:25,567 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/d5b016dd176b492eb59942b18c3fc86e, entries=150, sequenceid=494, filesize=12.0 K 2024-11-20T15:23:25,568 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 558f4ab8fec4b16865ece4459134acf9 in 1067ms, sequenceid=494, compaction requested=false 2024-11-20T15:23:25,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:25,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:25,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=157 2024-11-20T15:23:25,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=157 2024-11-20T15:23:25,571 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-11-20T15:23:25,571 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6810 sec 2024-11-20T15:23:25,573 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees in 1.6860 sec 2024-11-20T15:23:25,839 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T15:23:25,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:25,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:25,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:25,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:25,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:25,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:25,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:25,846 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/263177da1acf419481f4a6d3c4da701c is 50, key is test_row_0/A:col10/1732116204708/Put/seqid=0 2024-11-20T15:23:25,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742437_1613 (size=12301) 2024-11-20T15:23:25,938 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:25,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116265931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:25,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:25,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116265932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:25,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-11-20T15:23:25,993 INFO [Thread-2222 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 156 completed 2024-11-20T15:23:25,995 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:23:25,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=158, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=158, table=TestAcidGuarantees 2024-11-20T15:23:25,996 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=158, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=158, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:23:25,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-11-20T15:23:25,997 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=158, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=158, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:23:25,997 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:23:26,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:26,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116266040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:26,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:26,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116266040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:26,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-11-20T15:23:26,148 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:26,149 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=159 2024-11-20T15:23:26,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:26,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:26,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:26,149 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=159}] handler.RSProcedureHandler(58): pid=159 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:26,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=159 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:26,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=159 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:26,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:26,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116266245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:26,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:26,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116266248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:26,290 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=513 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/263177da1acf419481f4a6d3c4da701c 2024-11-20T15:23:26,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-11-20T15:23:26,298 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/875c16360a934cbc90ef464dc31ee68e is 50, key is test_row_0/B:col10/1732116204708/Put/seqid=0 2024-11-20T15:23:26,301 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:26,302 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=159 2024-11-20T15:23:26,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:26,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:26,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:26,302 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] handler.RSProcedureHandler(58): pid=159 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:26,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=159 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:26,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=159 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:26,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742438_1614 (size=12301) 2024-11-20T15:23:26,324 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=513 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/875c16360a934cbc90ef464dc31ee68e 2024-11-20T15:23:26,333 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/43d2e89ff43d44d8a39c1a388e46d2fc is 50, key is test_row_0/C:col10/1732116204708/Put/seqid=0 2024-11-20T15:23:26,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742439_1615 (size=12301) 2024-11-20T15:23:26,456 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:26,456 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=159 2024-11-20T15:23:26,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:26,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:26,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:26,457 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] handler.RSProcedureHandler(58): pid=159 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:26,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=159 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:26,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=159 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:26,557 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:26,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41486 deadline: 1732116266552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:26,558 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:26,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41430 deadline: 1732116266554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:26,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-11-20T15:23:26,610 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:26,610 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=159 2024-11-20T15:23:26,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:26,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:26,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:26,611 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=159}] handler.RSProcedureHandler(58): pid=159 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:26,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=159 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:26,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=159 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:26,766 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:26,766 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=159 2024-11-20T15:23:26,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:26,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. as already flushing 2024-11-20T15:23:26,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:26,766 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] handler.RSProcedureHandler(58): pid=159 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:26,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=159 java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:26,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=159 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:26,777 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=513 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/43d2e89ff43d44d8a39c1a388e46d2fc 2024-11-20T15:23:26,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/263177da1acf419481f4a6d3c4da701c as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/263177da1acf419481f4a6d3c4da701c 2024-11-20T15:23:26,794 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/263177da1acf419481f4a6d3c4da701c, entries=150, sequenceid=513, filesize=12.0 K 2024-11-20T15:23:26,795 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/875c16360a934cbc90ef464dc31ee68e as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/875c16360a934cbc90ef464dc31ee68e 2024-11-20T15:23:26,799 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/875c16360a934cbc90ef464dc31ee68e, entries=150, sequenceid=513, filesize=12.0 K 2024-11-20T15:23:26,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/43d2e89ff43d44d8a39c1a388e46d2fc as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/43d2e89ff43d44d8a39c1a388e46d2fc 2024-11-20T15:23:26,806 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/43d2e89ff43d44d8a39c1a388e46d2fc, entries=150, sequenceid=513, filesize=12.0 K 2024-11-20T15:23:26,807 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 558f4ab8fec4b16865ece4459134acf9 in 968ms, sequenceid=513, compaction requested=true 2024-11-20T15:23:26,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:26,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:23:26,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:26,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:23:26,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:26,807 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:26,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 558f4ab8fec4b16865ece4459134acf9:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:23:26,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T15:23:26,808 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:26,811 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:26,811 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/A is initiating minor compaction (all files) 2024-11-20T15:23:26,811 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/A in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:26,811 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/12e11e2b10f64b389e96e36f8845d2ae, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/78ed119f4ca64724b8efd80c383b4f33, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/263177da1acf419481f4a6d3c4da701c] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=36.8 K 2024-11-20T15:23:26,811 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 12e11e2b10f64b389e96e36f8845d2ae, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=473, earliestPutTs=1732116203299 2024-11-20T15:23:26,812 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 78ed119f4ca64724b8efd80c383b4f33, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=494, earliestPutTs=1732116204028 2024-11-20T15:23:26,813 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 263177da1acf419481f4a6d3c4da701c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=513, earliestPutTs=1732116204698 2024-11-20T15:23:26,814 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:26,814 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/B is initiating minor compaction (all files) 2024-11-20T15:23:26,814 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/B in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:26,814 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/857801944923488e927761c4ceafe0be, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/b93f258eddd2489d96da9c23e9484dd7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/875c16360a934cbc90ef464dc31ee68e] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=36.8 K 2024-11-20T15:23:26,815 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 857801944923488e927761c4ceafe0be, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=473, earliestPutTs=1732116203299 2024-11-20T15:23:26,815 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting b93f258eddd2489d96da9c23e9484dd7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=494, earliestPutTs=1732116204028 2024-11-20T15:23:26,817 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 875c16360a934cbc90ef464dc31ee68e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=513, earliestPutTs=1732116204698 2024-11-20T15:23:26,824 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#A#compaction#537 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:26,825 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/9f7f3ebea6b24d2388f71e8d0b763da8 is 50, key is test_row_0/A:col10/1732116204708/Put/seqid=0 2024-11-20T15:23:26,835 DEBUG [Thread-2225 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6050584c to 127.0.0.1:62338 2024-11-20T15:23:26,835 DEBUG [Thread-2225 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:26,837 DEBUG [Thread-2227 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6dd48863 to 127.0.0.1:62338 2024-11-20T15:23:26,837 DEBUG [Thread-2227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:26,838 DEBUG [Thread-2223 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2070263a to 127.0.0.1:62338 2024-11-20T15:23:26,838 DEBUG [Thread-2223 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:26,842 DEBUG [Thread-2229 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x51196534 to 127.0.0.1:62338 2024-11-20T15:23:26,842 DEBUG [Thread-2229 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:26,843 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#B#compaction#538 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:26,843 DEBUG [Thread-2231 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1dc5e114 to 127.0.0.1:62338 2024-11-20T15:23:26,843 DEBUG [Thread-2231 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:26,843 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/a44efe24f37845748ba79a94bf1cab43 is 50, key is test_row_0/B:col10/1732116204708/Put/seqid=0 2024-11-20T15:23:26,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742440_1616 (size=13187) 2024-11-20T15:23:26,851 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/9f7f3ebea6b24d2388f71e8d0b763da8 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/9f7f3ebea6b24d2388f71e8d0b763da8 2024-11-20T15:23:26,856 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/A of 558f4ab8fec4b16865ece4459134acf9 into 9f7f3ebea6b24d2388f71e8d0b763da8(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:26,856 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:26,856 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/A, priority=13, startTime=1732116206807; duration=0sec 2024-11-20T15:23:26,856 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:26,856 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:A 2024-11-20T15:23:26,856 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:26,858 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:26,858 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): 558f4ab8fec4b16865ece4459134acf9/C is initiating minor compaction (all files) 2024-11-20T15:23:26,858 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 558f4ab8fec4b16865ece4459134acf9/C in TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:26,858 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/c0fcb871bcbe4cd5a812ce6f69a78a08, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/d5b016dd176b492eb59942b18c3fc86e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/43d2e89ff43d44d8a39c1a388e46d2fc] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp, totalSize=36.8 K 2024-11-20T15:23:26,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742441_1617 (size=13187) 2024-11-20T15:23:26,858 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting c0fcb871bcbe4cd5a812ce6f69a78a08, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=473, earliestPutTs=1732116203299 2024-11-20T15:23:26,859 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting d5b016dd176b492eb59942b18c3fc86e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=494, earliestPutTs=1732116204028 2024-11-20T15:23:26,860 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43d2e89ff43d44d8a39c1a388e46d2fc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=513, earliestPutTs=1732116204698 2024-11-20T15:23:26,868 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/a44efe24f37845748ba79a94bf1cab43 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/a44efe24f37845748ba79a94bf1cab43 2024-11-20T15:23:26,878 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 558f4ab8fec4b16865ece4459134acf9#C#compaction#539 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:26,879 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/e6e80886ff2f4ecab8635b6d373d7f9b is 50, key is test_row_0/C:col10/1732116204708/Put/seqid=0 2024-11-20T15:23:26,880 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/B of 558f4ab8fec4b16865ece4459134acf9 into a44efe24f37845748ba79a94bf1cab43(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:26,880 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:26,880 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/B, priority=13, startTime=1732116206807; duration=0sec 2024-11-20T15:23:26,880 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:26,880 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:B 2024-11-20T15:23:26,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742442_1618 (size=13187) 2024-11-20T15:23:26,918 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:26,919 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=159 2024-11-20T15:23:26,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:26,919 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T15:23:26,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:26,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:26,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:26,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:26,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:26,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:26,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/606f45627aae40769c3c9fb9bcbbc664 is 50, key is test_row_0/A:col10/1732116205930/Put/seqid=0 2024-11-20T15:23:26,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742443_1619 (size=12301) 2024-11-20T15:23:26,952 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=535 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/606f45627aae40769c3c9fb9bcbbc664 2024-11-20T15:23:26,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/6c75f10872204802920ff789775c8a6a is 50, key is test_row_0/B:col10/1732116205930/Put/seqid=0 2024-11-20T15:23:26,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742444_1620 (size=12301) 2024-11-20T15:23:26,970 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=535 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/6c75f10872204802920ff789775c8a6a 2024-11-20T15:23:26,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/5ae105b133364c5c9599dbd30338263c is 50, key is test_row_0/C:col10/1732116205930/Put/seqid=0 2024-11-20T15:23:27,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742445_1621 (size=12301) 2024-11-20T15:23:27,010 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=535 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/5ae105b133364c5c9599dbd30338263c 2024-11-20T15:23:27,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/606f45627aae40769c3c9fb9bcbbc664 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/606f45627aae40769c3c9fb9bcbbc664 2024-11-20T15:23:27,017 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/606f45627aae40769c3c9fb9bcbbc664, entries=150, sequenceid=535, filesize=12.0 K 2024-11-20T15:23:27,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/6c75f10872204802920ff789775c8a6a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/6c75f10872204802920ff789775c8a6a 2024-11-20T15:23:27,027 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/6c75f10872204802920ff789775c8a6a, entries=150, sequenceid=535, filesize=12.0 K 2024-11-20T15:23:27,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/5ae105b133364c5c9599dbd30338263c as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/5ae105b133364c5c9599dbd30338263c 2024-11-20T15:23:27,033 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/5ae105b133364c5c9599dbd30338263c, entries=150, sequenceid=535, filesize=12.0 K 2024-11-20T15:23:27,034 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=0 B/0 for 558f4ab8fec4b16865ece4459134acf9 in 115ms, sequenceid=535, compaction requested=false 2024-11-20T15:23:27,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegion(2538): Flush status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:27,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:27,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=159 2024-11-20T15:23:27,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=159 2024-11-20T15:23:27,037 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=158 2024-11-20T15:23:27,037 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=158, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0380 sec 2024-11-20T15:23:27,038 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=158, table=TestAcidGuarantees in 1.0430 sec 2024-11-20T15:23:27,061 DEBUG [Thread-2216 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5871c039 to 127.0.0.1:62338 2024-11-20T15:23:27,061 DEBUG [Thread-2216 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:27,064 DEBUG [Thread-2220 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b7f20c4 to 127.0.0.1:62338 2024-11-20T15:23:27,065 DEBUG [Thread-2220 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:27,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-11-20T15:23:27,100 INFO [Thread-2222 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 158 completed 2024-11-20T15:23:27,307 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/e6e80886ff2f4ecab8635b6d373d7f9b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/e6e80886ff2f4ecab8635b6d373d7f9b 2024-11-20T15:23:27,312 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 558f4ab8fec4b16865ece4459134acf9/C of 558f4ab8fec4b16865ece4459134acf9 into e6e80886ff2f4ecab8635b6d373d7f9b(size=12.9 K), total size for store is 24.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:27,312 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:27,312 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9., storeName=558f4ab8fec4b16865ece4459134acf9/C, priority=13, startTime=1732116206807; duration=0sec 2024-11-20T15:23:27,312 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:27,312 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 558f4ab8fec4b16865ece4459134acf9:C 2024-11-20T15:23:32,387 DEBUG [Thread-2218 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7daa5922 to 127.0.0.1:62338 2024-11-20T15:23:32,387 DEBUG [Thread-2218 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:32,396 DEBUG [Thread-2214 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7d0ab200 to 127.0.0.1:62338 2024-11-20T15:23:32,396 DEBUG [Thread-2214 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:32,418 DEBUG [Thread-2212 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x65df2359 to 127.0.0.1:62338 2024-11-20T15:23:32,418 DEBUG [Thread-2212 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:32,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T15:23:32,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 61 2024-11-20T15:23:32,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 77 2024-11-20T15:23:32,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 108 2024-11-20T15:23:32,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 58 2024-11-20T15:23:32,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 116 2024-11-20T15:23:32,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T15:23:32,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T15:23:32,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2091 2024-11-20T15:23:32,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6273 rows 2024-11-20T15:23:32,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2118 2024-11-20T15:23:32,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6354 rows 2024-11-20T15:23:32,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2103 2024-11-20T15:23:32,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6309 rows 2024-11-20T15:23:32,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2101 2024-11-20T15:23:32,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6303 rows 2024-11-20T15:23:32,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2102 2024-11-20T15:23:32,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6306 rows 2024-11-20T15:23:32,418 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T15:23:32,419 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x63cefe40 to 127.0.0.1:62338 2024-11-20T15:23:32,419 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:32,421 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T15:23:32,422 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T15:23:32,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=160, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T15:23:32,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=160 2024-11-20T15:23:32,426 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732116212426"}]},"ts":"1732116212426"} 2024-11-20T15:23:32,428 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T15:23:32,430 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T15:23:32,431 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=161, ppid=160, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T15:23:32,432 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=558f4ab8fec4b16865ece4459134acf9, UNASSIGN}] 2024-11-20T15:23:32,433 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=162, ppid=161, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=558f4ab8fec4b16865ece4459134acf9, UNASSIGN 2024-11-20T15:23:32,433 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=162 updating hbase:meta row=558f4ab8fec4b16865ece4459134acf9, regionState=CLOSING, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:23:32,434 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T15:23:32,434 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE; CloseRegionProcedure 558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954}] 2024-11-20T15:23:32,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=160 2024-11-20T15:23:32,585 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:32,586 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] handler.UnassignRegionHandler(124): Close 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:32,586 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T15:23:32,586 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1681): Closing 558f4ab8fec4b16865ece4459134acf9, disabling compactions & flushes 2024-11-20T15:23:32,586 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:32,586 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:32,586 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. after waiting 0 ms 2024-11-20T15:23:32,586 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:32,586 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(2837): Flushing 558f4ab8fec4b16865ece4459134acf9 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T15:23:32,586 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=A 2024-11-20T15:23:32,587 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:32,587 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=B 2024-11-20T15:23:32,587 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:32,587 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 558f4ab8fec4b16865ece4459134acf9, store=C 2024-11-20T15:23:32,587 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:32,591 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/77e05aa1275d4cd19c583106fe79adcf is 50, key is test_row_0/A:col10/1732116212386/Put/seqid=0 2024-11-20T15:23:32,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742446_1622 (size=12301) 2024-11-20T15:23:32,595 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=544 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/77e05aa1275d4cd19c583106fe79adcf 2024-11-20T15:23:32,602 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/d65625f4aa744e8cbb70cb1c076f5161 is 50, key is test_row_0/B:col10/1732116212386/Put/seqid=0 2024-11-20T15:23:32,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742447_1623 (size=12301) 2024-11-20T15:23:32,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=160 2024-11-20T15:23:33,006 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=544 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/d65625f4aa744e8cbb70cb1c076f5161 2024-11-20T15:23:33,012 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/4162c402229b462ca1b775b4a38960b6 is 50, key is test_row_0/C:col10/1732116212386/Put/seqid=0 2024-11-20T15:23:33,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742448_1624 (size=12301) 2024-11-20T15:23:33,018 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=544 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/4162c402229b462ca1b775b4a38960b6 2024-11-20T15:23:33,022 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/A/77e05aa1275d4cd19c583106fe79adcf as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/77e05aa1275d4cd19c583106fe79adcf 2024-11-20T15:23:33,025 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/77e05aa1275d4cd19c583106fe79adcf, entries=150, sequenceid=544, filesize=12.0 K 2024-11-20T15:23:33,026 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/B/d65625f4aa744e8cbb70cb1c076f5161 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/d65625f4aa744e8cbb70cb1c076f5161 2024-11-20T15:23:33,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=160 2024-11-20T15:23:33,030 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/d65625f4aa744e8cbb70cb1c076f5161, entries=150, sequenceid=544, filesize=12.0 K 2024-11-20T15:23:33,030 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/.tmp/C/4162c402229b462ca1b775b4a38960b6 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/4162c402229b462ca1b775b4a38960b6 2024-11-20T15:23:33,034 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/4162c402229b462ca1b775b4a38960b6, entries=150, sequenceid=544, filesize=12.0 K 2024-11-20T15:23:33,034 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 558f4ab8fec4b16865ece4459134acf9 in 448ms, sequenceid=544, compaction requested=true 2024-11-20T15:23:33,035 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/4613c92e5c564cfb9a9295524a463072, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/6fbf314cb42e4cbc892db2462d643dfd, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/9588c4dba8d84c748103c11f853714e4, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/fc7e7c3916da46da97db55a3f1bfea4c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/38821d700772465c8c41afa2a56e0ebc, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/a396fbba6d3f41febd6233b5c38ab8ce, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/d3f8cba5c69e41f3b789bb2cc391d9af, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/181ec45a3c5a420aa5230a16c964e335, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/88514672817047b8b718289fdcd092a1, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/7dce35b6caae40ae93c6fc300240c707, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/b6a1a0271b9b44239c698e16eef4ff47, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/4f78b7c9fc9e4693872eb5f23e2f68ec, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/a77e469303d1449ab6e46d92e2710a15, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/3cd13038ffc242ec8b262f9a81df9374, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/461ccb5d406348819d710b220c0de879, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/5b6367ac5ad4407fb280f6582d7302cb, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/d3675e12e65d469e8e3668961b6dc023, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/9e586412d1584c1ea3a56e798d197ceb, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/2199d03af1804ce29bdb890d2e5d713b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/85c50ded8bf9487f93b1e83c0937fd79, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/d336e72cfd59436dacc7b57d491db460, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/7ab69ca82fd7478fa4af9db606cd0d7a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/12e11e2b10f64b389e96e36f8845d2ae, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/3b1a95e5e102431782f35673f0b6fce6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/78ed119f4ca64724b8efd80c383b4f33, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/263177da1acf419481f4a6d3c4da701c] to archive 2024-11-20T15:23:33,036 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T15:23:33,037 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/4613c92e5c564cfb9a9295524a463072 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/4613c92e5c564cfb9a9295524a463072 2024-11-20T15:23:33,039 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/6fbf314cb42e4cbc892db2462d643dfd to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/6fbf314cb42e4cbc892db2462d643dfd 2024-11-20T15:23:33,040 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/9588c4dba8d84c748103c11f853714e4 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/9588c4dba8d84c748103c11f853714e4 2024-11-20T15:23:33,041 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/fc7e7c3916da46da97db55a3f1bfea4c to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/fc7e7c3916da46da97db55a3f1bfea4c 2024-11-20T15:23:33,042 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/38821d700772465c8c41afa2a56e0ebc to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/38821d700772465c8c41afa2a56e0ebc 2024-11-20T15:23:33,043 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/a396fbba6d3f41febd6233b5c38ab8ce to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/a396fbba6d3f41febd6233b5c38ab8ce 2024-11-20T15:23:33,044 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/d3f8cba5c69e41f3b789bb2cc391d9af to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/d3f8cba5c69e41f3b789bb2cc391d9af 2024-11-20T15:23:33,045 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/181ec45a3c5a420aa5230a16c964e335 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/181ec45a3c5a420aa5230a16c964e335 2024-11-20T15:23:33,047 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/88514672817047b8b718289fdcd092a1 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/88514672817047b8b718289fdcd092a1 2024-11-20T15:23:33,048 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/7dce35b6caae40ae93c6fc300240c707 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/7dce35b6caae40ae93c6fc300240c707 2024-11-20T15:23:33,049 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/b6a1a0271b9b44239c698e16eef4ff47 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/b6a1a0271b9b44239c698e16eef4ff47 2024-11-20T15:23:33,050 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/4f78b7c9fc9e4693872eb5f23e2f68ec to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/4f78b7c9fc9e4693872eb5f23e2f68ec 2024-11-20T15:23:33,052 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/a77e469303d1449ab6e46d92e2710a15 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/a77e469303d1449ab6e46d92e2710a15 2024-11-20T15:23:33,053 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/3cd13038ffc242ec8b262f9a81df9374 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/3cd13038ffc242ec8b262f9a81df9374 2024-11-20T15:23:33,054 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/461ccb5d406348819d710b220c0de879 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/461ccb5d406348819d710b220c0de879 2024-11-20T15:23:33,055 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/5b6367ac5ad4407fb280f6582d7302cb to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/5b6367ac5ad4407fb280f6582d7302cb 2024-11-20T15:23:33,056 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/d3675e12e65d469e8e3668961b6dc023 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/d3675e12e65d469e8e3668961b6dc023 2024-11-20T15:23:33,058 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/9e586412d1584c1ea3a56e798d197ceb to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/9e586412d1584c1ea3a56e798d197ceb 2024-11-20T15:23:33,059 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/2199d03af1804ce29bdb890d2e5d713b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/2199d03af1804ce29bdb890d2e5d713b 2024-11-20T15:23:33,060 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/85c50ded8bf9487f93b1e83c0937fd79 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/85c50ded8bf9487f93b1e83c0937fd79 2024-11-20T15:23:33,061 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/d336e72cfd59436dacc7b57d491db460 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/d336e72cfd59436dacc7b57d491db460 2024-11-20T15:23:33,062 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/7ab69ca82fd7478fa4af9db606cd0d7a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/7ab69ca82fd7478fa4af9db606cd0d7a 2024-11-20T15:23:33,063 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/12e11e2b10f64b389e96e36f8845d2ae to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/12e11e2b10f64b389e96e36f8845d2ae 2024-11-20T15:23:33,066 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/3b1a95e5e102431782f35673f0b6fce6 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/3b1a95e5e102431782f35673f0b6fce6 2024-11-20T15:23:33,067 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/78ed119f4ca64724b8efd80c383b4f33 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/78ed119f4ca64724b8efd80c383b4f33 2024-11-20T15:23:33,069 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/263177da1acf419481f4a6d3c4da701c to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/263177da1acf419481f4a6d3c4da701c 2024-11-20T15:23:33,070 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/952d5736c8234a4792fa18c7acc71df3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/4d0dfc64b146484695c8957dba0dd546, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/8e1f9d158c0b45c996f5b0d6e24876dc, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/a7ac53df52dc45f38521fabb30ecfafc, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/e3c967bd6baa4a09b15efa02ccbd7b8d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/3bf21dbdf2504fd28bb0333f645dae86, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/234fbe14e68f4d989df16b95f660a389, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/52d64d5c8f064607b3341fdaa0c6afd6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/cbe5a57172b7478095fd57d88d4db874, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/eb1fca155e5448acb65a8e9d6a377d62, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/6f02ee10b9e94e8eac1b3d967060ef8f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/3f70e1e2090d4e089ea88882732ed144, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/1e9b5cfc159148f6991067f147a8c19f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/c109e73aae9f4e3e90f1f69edc4c899b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/64e48c174a04429e8dbd9cd0c098495d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/d4ea1e7b773a47c1b41185326b4c0aac, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/aca33a33ff3f4c4ca68ebae20fd30296, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/44203175a0034cd486e755a846ff5572, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/e7e0859b7df94e72bda81042ad42b5eb, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/b949c64793ed4b1b9b33ea2dd30cf717, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/0f76828d9cc846fea2cd9591f557fa99, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/a8c2e8edeaef4adc83a0592bc394328b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/857801944923488e927761c4ceafe0be, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/d5911631a6384a218561d00daab28551, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/b93f258eddd2489d96da9c23e9484dd7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/875c16360a934cbc90ef464dc31ee68e] to archive 2024-11-20T15:23:33,071 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T15:23:33,074 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/952d5736c8234a4792fa18c7acc71df3 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/952d5736c8234a4792fa18c7acc71df3 2024-11-20T15:23:33,077 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/4d0dfc64b146484695c8957dba0dd546 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/4d0dfc64b146484695c8957dba0dd546 2024-11-20T15:23:33,078 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/8e1f9d158c0b45c996f5b0d6e24876dc to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/8e1f9d158c0b45c996f5b0d6e24876dc 2024-11-20T15:23:33,079 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/a7ac53df52dc45f38521fabb30ecfafc to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/a7ac53df52dc45f38521fabb30ecfafc 2024-11-20T15:23:33,081 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/e3c967bd6baa4a09b15efa02ccbd7b8d to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/e3c967bd6baa4a09b15efa02ccbd7b8d 2024-11-20T15:23:33,083 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/3bf21dbdf2504fd28bb0333f645dae86 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/3bf21dbdf2504fd28bb0333f645dae86 2024-11-20T15:23:33,084 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/234fbe14e68f4d989df16b95f660a389 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/234fbe14e68f4d989df16b95f660a389 2024-11-20T15:23:33,085 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/52d64d5c8f064607b3341fdaa0c6afd6 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/52d64d5c8f064607b3341fdaa0c6afd6 2024-11-20T15:23:33,086 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/cbe5a57172b7478095fd57d88d4db874 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/cbe5a57172b7478095fd57d88d4db874 2024-11-20T15:23:33,087 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/eb1fca155e5448acb65a8e9d6a377d62 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/eb1fca155e5448acb65a8e9d6a377d62 2024-11-20T15:23:33,088 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/6f02ee10b9e94e8eac1b3d967060ef8f to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/6f02ee10b9e94e8eac1b3d967060ef8f 2024-11-20T15:23:33,089 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/3f70e1e2090d4e089ea88882732ed144 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/3f70e1e2090d4e089ea88882732ed144 2024-11-20T15:23:33,090 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/1e9b5cfc159148f6991067f147a8c19f to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/1e9b5cfc159148f6991067f147a8c19f 2024-11-20T15:23:33,091 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/c109e73aae9f4e3e90f1f69edc4c899b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/c109e73aae9f4e3e90f1f69edc4c899b 2024-11-20T15:23:33,092 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/64e48c174a04429e8dbd9cd0c098495d to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/64e48c174a04429e8dbd9cd0c098495d 2024-11-20T15:23:33,093 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/d4ea1e7b773a47c1b41185326b4c0aac to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/d4ea1e7b773a47c1b41185326b4c0aac 2024-11-20T15:23:33,094 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/aca33a33ff3f4c4ca68ebae20fd30296 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/aca33a33ff3f4c4ca68ebae20fd30296 2024-11-20T15:23:33,095 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/44203175a0034cd486e755a846ff5572 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/44203175a0034cd486e755a846ff5572 2024-11-20T15:23:33,096 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/e7e0859b7df94e72bda81042ad42b5eb to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/e7e0859b7df94e72bda81042ad42b5eb 2024-11-20T15:23:33,097 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/b949c64793ed4b1b9b33ea2dd30cf717 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/b949c64793ed4b1b9b33ea2dd30cf717 2024-11-20T15:23:33,098 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/0f76828d9cc846fea2cd9591f557fa99 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/0f76828d9cc846fea2cd9591f557fa99 2024-11-20T15:23:33,100 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/a8c2e8edeaef4adc83a0592bc394328b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/a8c2e8edeaef4adc83a0592bc394328b 2024-11-20T15:23:33,101 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/857801944923488e927761c4ceafe0be to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/857801944923488e927761c4ceafe0be 2024-11-20T15:23:33,102 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/d5911631a6384a218561d00daab28551 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/d5911631a6384a218561d00daab28551 2024-11-20T15:23:33,103 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/b93f258eddd2489d96da9c23e9484dd7 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/b93f258eddd2489d96da9c23e9484dd7 2024-11-20T15:23:33,104 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/875c16360a934cbc90ef464dc31ee68e to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/875c16360a934cbc90ef464dc31ee68e 2024-11-20T15:23:33,105 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/6d0621d8fdb6422db25f07d5972c99e6, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/0f412ed1f46349c7a0fd610c5dc69d15, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/0721bbed1f764d5782980e58a17fc77e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/baa4076e6cf543f0a48b25a6d7cf23f9, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/885839d709a7406598df265dc1b3a4ca, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/908c1a0578284130b539b637d751c631, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/f1cae391da904884bc0991b7773c9aec, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/36a3535f0dd8413dbc23957c7ca042e9, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/f4dd69e4e9324ccd82086471fa71c5e7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/188718a7414843dbabfe99bccd85ce0a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/17426c75cb344f26aed014d74bad4d44, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/29aca2294f4a4581b214883853200c6b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/497a3af68f4541a2a1d8ed1146f791fc, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/a3c86d0366c444a2a91c8494aa13f08c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/9d1aeae5f1d747a8b46aee902bd18e1e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/3fa89a8b144c4bce83eaf5dd159a4f88, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/43422248e70e4f9b9768cf3390150773, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/dcf43b256cd5417db04512cabe2c8d22, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/b926ded073b3496eb61538118234f520, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/e82bab4b828e4d5c85f56bcddc3ea242, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/0bf4d90058d74d90bd33a982899657ae, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/ca009649917243af9ad9c521c0a374d2, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/c0fcb871bcbe4cd5a812ce6f69a78a08, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/24093ff731ea42d1a059b4d4fb5c731b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/d5b016dd176b492eb59942b18c3fc86e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/43d2e89ff43d44d8a39c1a388e46d2fc] to archive 2024-11-20T15:23:33,107 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T15:23:33,108 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/6d0621d8fdb6422db25f07d5972c99e6 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/6d0621d8fdb6422db25f07d5972c99e6 2024-11-20T15:23:33,109 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/0f412ed1f46349c7a0fd610c5dc69d15 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/0f412ed1f46349c7a0fd610c5dc69d15 2024-11-20T15:23:33,110 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/0721bbed1f764d5782980e58a17fc77e to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/0721bbed1f764d5782980e58a17fc77e 2024-11-20T15:23:33,111 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/baa4076e6cf543f0a48b25a6d7cf23f9 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/baa4076e6cf543f0a48b25a6d7cf23f9 2024-11-20T15:23:33,112 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/885839d709a7406598df265dc1b3a4ca to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/885839d709a7406598df265dc1b3a4ca 2024-11-20T15:23:33,113 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/908c1a0578284130b539b637d751c631 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/908c1a0578284130b539b637d751c631 2024-11-20T15:23:33,114 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/f1cae391da904884bc0991b7773c9aec to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/f1cae391da904884bc0991b7773c9aec 2024-11-20T15:23:33,115 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/36a3535f0dd8413dbc23957c7ca042e9 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/36a3535f0dd8413dbc23957c7ca042e9 2024-11-20T15:23:33,116 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/f4dd69e4e9324ccd82086471fa71c5e7 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/f4dd69e4e9324ccd82086471fa71c5e7 2024-11-20T15:23:33,117 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/188718a7414843dbabfe99bccd85ce0a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/188718a7414843dbabfe99bccd85ce0a 2024-11-20T15:23:33,118 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/17426c75cb344f26aed014d74bad4d44 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/17426c75cb344f26aed014d74bad4d44 2024-11-20T15:23:33,119 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/29aca2294f4a4581b214883853200c6b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/29aca2294f4a4581b214883853200c6b 2024-11-20T15:23:33,120 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/497a3af68f4541a2a1d8ed1146f791fc to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/497a3af68f4541a2a1d8ed1146f791fc 2024-11-20T15:23:33,121 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/a3c86d0366c444a2a91c8494aa13f08c to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/a3c86d0366c444a2a91c8494aa13f08c 2024-11-20T15:23:33,122 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/9d1aeae5f1d747a8b46aee902bd18e1e to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/9d1aeae5f1d747a8b46aee902bd18e1e 2024-11-20T15:23:33,123 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/3fa89a8b144c4bce83eaf5dd159a4f88 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/3fa89a8b144c4bce83eaf5dd159a4f88 2024-11-20T15:23:33,124 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/43422248e70e4f9b9768cf3390150773 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/43422248e70e4f9b9768cf3390150773 2024-11-20T15:23:33,124 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/dcf43b256cd5417db04512cabe2c8d22 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/dcf43b256cd5417db04512cabe2c8d22 2024-11-20T15:23:33,125 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/b926ded073b3496eb61538118234f520 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/b926ded073b3496eb61538118234f520 2024-11-20T15:23:33,126 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/e82bab4b828e4d5c85f56bcddc3ea242 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/e82bab4b828e4d5c85f56bcddc3ea242 2024-11-20T15:23:33,127 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/0bf4d90058d74d90bd33a982899657ae to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/0bf4d90058d74d90bd33a982899657ae 2024-11-20T15:23:33,128 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/ca009649917243af9ad9c521c0a374d2 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/ca009649917243af9ad9c521c0a374d2 2024-11-20T15:23:33,129 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/c0fcb871bcbe4cd5a812ce6f69a78a08 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/c0fcb871bcbe4cd5a812ce6f69a78a08 2024-11-20T15:23:33,130 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/24093ff731ea42d1a059b4d4fb5c731b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/24093ff731ea42d1a059b4d4fb5c731b 2024-11-20T15:23:33,131 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/d5b016dd176b492eb59942b18c3fc86e to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/d5b016dd176b492eb59942b18c3fc86e 2024-11-20T15:23:33,132 DEBUG [StoreCloser-TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/43d2e89ff43d44d8a39c1a388e46d2fc to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/43d2e89ff43d44d8a39c1a388e46d2fc 2024-11-20T15:23:33,136 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/recovered.edits/547.seqid, newMaxSeqId=547, maxSeqId=1 2024-11-20T15:23:33,136 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9. 2024-11-20T15:23:33,137 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1635): Region close journal for 558f4ab8fec4b16865ece4459134acf9: 2024-11-20T15:23:33,138 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] handler.UnassignRegionHandler(170): Closed 558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:33,138 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=162 updating hbase:meta row=558f4ab8fec4b16865ece4459134acf9, regionState=CLOSED 2024-11-20T15:23:33,141 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=163, resume processing ppid=162 2024-11-20T15:23:33,141 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, ppid=162, state=SUCCESS; CloseRegionProcedure 558f4ab8fec4b16865ece4459134acf9, server=0b62285ead89,33387,1732116069954 in 705 msec 2024-11-20T15:23:33,142 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-11-20T15:23:33,142 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=558f4ab8fec4b16865ece4459134acf9, UNASSIGN in 709 msec 2024-11-20T15:23:33,144 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=161, resume processing ppid=160 2024-11-20T15:23:33,144 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, ppid=160, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 711 msec 2024-11-20T15:23:33,145 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732116213145"}]},"ts":"1732116213145"} 2024-11-20T15:23:33,146 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T15:23:33,148 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T15:23:33,149 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 726 msec 2024-11-20T15:23:33,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=160 2024-11-20T15:23:33,529 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 160 completed 2024-11-20T15:23:33,530 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T15:23:33,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=164, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:23:33,531 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=164, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:23:33,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=164 2024-11-20T15:23:33,532 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=164, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:23:33,534 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:33,537 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A, FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B, FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C, FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/recovered.edits] 2024-11-20T15:23:33,539 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/606f45627aae40769c3c9fb9bcbbc664 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/606f45627aae40769c3c9fb9bcbbc664 2024-11-20T15:23:33,541 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/77e05aa1275d4cd19c583106fe79adcf to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/77e05aa1275d4cd19c583106fe79adcf 2024-11-20T15:23:33,542 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/9f7f3ebea6b24d2388f71e8d0b763da8 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/A/9f7f3ebea6b24d2388f71e8d0b763da8 2024-11-20T15:23:33,545 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/6c75f10872204802920ff789775c8a6a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/6c75f10872204802920ff789775c8a6a 2024-11-20T15:23:33,546 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/a44efe24f37845748ba79a94bf1cab43 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/a44efe24f37845748ba79a94bf1cab43 2024-11-20T15:23:33,547 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/d65625f4aa744e8cbb70cb1c076f5161 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/B/d65625f4aa744e8cbb70cb1c076f5161 2024-11-20T15:23:33,549 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/4162c402229b462ca1b775b4a38960b6 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/4162c402229b462ca1b775b4a38960b6 2024-11-20T15:23:33,550 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/5ae105b133364c5c9599dbd30338263c to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/5ae105b133364c5c9599dbd30338263c 2024-11-20T15:23:33,552 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/e6e80886ff2f4ecab8635b6d373d7f9b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/C/e6e80886ff2f4ecab8635b6d373d7f9b 2024-11-20T15:23:33,562 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/recovered.edits/547.seqid to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9/recovered.edits/547.seqid 2024-11-20T15:23:33,562 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/558f4ab8fec4b16865ece4459134acf9 2024-11-20T15:23:33,562 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T15:23:33,564 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=164, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:23:33,566 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T15:23:33,568 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T15:23:33,569 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=164, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:23:33,569 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T15:23:33,569 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732116213569"}]},"ts":"9223372036854775807"} 2024-11-20T15:23:33,571 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T15:23:33,571 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 558f4ab8fec4b16865ece4459134acf9, NAME => 'TestAcidGuarantees,,1732116184601.558f4ab8fec4b16865ece4459134acf9.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T15:23:33,571 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T15:23:33,571 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732116213571"}]},"ts":"9223372036854775807"} 2024-11-20T15:23:33,572 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T15:23:33,575 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=164, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:23:33,576 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 45 msec 2024-11-20T15:23:33,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=164 2024-11-20T15:23:33,633 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 164 completed 2024-11-20T15:23:33,644 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=238 (was 241), OpenFileDescriptor=453 (was 461), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=690 (was 683) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5518 (was 5595) 2024-11-20T15:23:33,656 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=238, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=690, ProcessCount=11, AvailableMemoryMB=5518 2024-11-20T15:23:33,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T15:23:33,657 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T15:23:33,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T15:23:33,659 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=165, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T15:23:33,659 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:33,659 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 165 2024-11-20T15:23:33,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T15:23:33,660 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=165, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T15:23:33,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742449_1625 (size=960) 2024-11-20T15:23:33,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T15:23:33,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T15:23:34,071 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3 2024-11-20T15:23:34,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742450_1626 (size=53) 2024-11-20T15:23:34,077 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T15:23:34,077 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing b9a29e8e077d15da0686c3415c44e050, disabling compactions & flushes 2024-11-20T15:23:34,077 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:34,077 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:34,077 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. after waiting 0 ms 2024-11-20T15:23:34,077 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:34,077 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:34,077 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:34,078 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=165, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T15:23:34,079 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732116214078"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732116214078"}]},"ts":"1732116214078"} 2024-11-20T15:23:34,080 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T15:23:34,080 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=165, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T15:23:34,080 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732116214080"}]},"ts":"1732116214080"} 2024-11-20T15:23:34,081 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T15:23:34,086 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b9a29e8e077d15da0686c3415c44e050, ASSIGN}] 2024-11-20T15:23:34,087 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=166, ppid=165, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b9a29e8e077d15da0686c3415c44e050, ASSIGN 2024-11-20T15:23:34,087 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=166, ppid=165, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=b9a29e8e077d15da0686c3415c44e050, ASSIGN; state=OFFLINE, location=0b62285ead89,33387,1732116069954; forceNewPlan=false, retain=false 2024-11-20T15:23:34,238 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=166 updating hbase:meta row=b9a29e8e077d15da0686c3415c44e050, regionState=OPENING, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:23:34,239 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=167, ppid=166, state=RUNNABLE; OpenRegionProcedure b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954}] 2024-11-20T15:23:34,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T15:23:34,390 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:34,394 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:34,394 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(7285): Opening region: {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} 2024-11-20T15:23:34,395 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:34,395 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T15:23:34,395 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(7327): checking encryption for b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:34,395 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(7330): checking classloading for b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:34,396 INFO [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:34,397 INFO [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:23:34,398 INFO [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b9a29e8e077d15da0686c3415c44e050 columnFamilyName A 2024-11-20T15:23:34,398 DEBUG [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:34,398 INFO [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] regionserver.HStore(327): Store=b9a29e8e077d15da0686c3415c44e050/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:23:34,398 INFO [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:34,400 INFO [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:23:34,400 INFO [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b9a29e8e077d15da0686c3415c44e050 columnFamilyName B 2024-11-20T15:23:34,400 DEBUG [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:34,400 INFO [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] regionserver.HStore(327): Store=b9a29e8e077d15da0686c3415c44e050/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:23:34,400 INFO [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:34,401 INFO [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:23:34,401 INFO [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b9a29e8e077d15da0686c3415c44e050 columnFamilyName C 2024-11-20T15:23:34,401 DEBUG [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:34,402 INFO [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] regionserver.HStore(327): Store=b9a29e8e077d15da0686c3415c44e050/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:23:34,402 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:34,403 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:34,403 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:34,405 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T15:23:34,406 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(1085): writing seq id for b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:34,408 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T15:23:34,408 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(1102): Opened b9a29e8e077d15da0686c3415c44e050; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75345890, jitterRate=0.12274125218391418}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T15:23:34,409 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(1001): Region open journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:34,410 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., pid=167, masterSystemTime=1732116214390 2024-11-20T15:23:34,411 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:34,411 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:34,412 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=166 updating hbase:meta row=b9a29e8e077d15da0686c3415c44e050, regionState=OPEN, openSeqNum=2, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:23:34,414 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=167, resume processing ppid=166 2024-11-20T15:23:34,414 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, ppid=166, state=SUCCESS; OpenRegionProcedure b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 in 174 msec 2024-11-20T15:23:34,415 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-11-20T15:23:34,415 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=b9a29e8e077d15da0686c3415c44e050, ASSIGN in 328 msec 2024-11-20T15:23:34,416 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=165, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T15:23:34,416 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732116214416"}]},"ts":"1732116214416"} 2024-11-20T15:23:34,417 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T15:23:34,420 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=165, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T15:23:34,421 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 763 msec 2024-11-20T15:23:34,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T15:23:34,763 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-11-20T15:23:34,764 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x537a66f8 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2ac53e79 2024-11-20T15:23:34,771 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d5efb7a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:23:34,773 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:23:34,774 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35510, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:23:34,775 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T15:23:34,776 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52108, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T15:23:34,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T15:23:34,777 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T15:23:34,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=168, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T15:23:34,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742451_1627 (size=996) 2024-11-20T15:23:35,188 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-20T15:23:35,188 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-20T15:23:35,190 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=169, ppid=168, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T15:23:35,192 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b9a29e8e077d15da0686c3415c44e050, REOPEN/MOVE}] 2024-11-20T15:23:35,193 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b9a29e8e077d15da0686c3415c44e050, REOPEN/MOVE 2024-11-20T15:23:35,193 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=170 updating hbase:meta row=b9a29e8e077d15da0686c3415c44e050, regionState=CLOSING, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:23:35,194 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T15:23:35,194 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=171, ppid=170, state=RUNNABLE; CloseRegionProcedure b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954}] 2024-11-20T15:23:35,346 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:35,346 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] handler.UnassignRegionHandler(124): Close b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:35,346 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T15:23:35,346 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] regionserver.HRegion(1681): Closing b9a29e8e077d15da0686c3415c44e050, disabling compactions & flushes 2024-11-20T15:23:35,346 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:35,346 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:35,346 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. after waiting 0 ms 2024-11-20T15:23:35,346 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:35,350 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T15:23:35,351 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:35,351 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] regionserver.HRegion(1635): Region close journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:35,351 WARN [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] regionserver.HRegionServer(3786): Not adding moved region record: b9a29e8e077d15da0686c3415c44e050 to self. 2024-11-20T15:23:35,352 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] handler.UnassignRegionHandler(170): Closed b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:35,353 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=170 updating hbase:meta row=b9a29e8e077d15da0686c3415c44e050, regionState=CLOSED 2024-11-20T15:23:35,355 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=171, resume processing ppid=170 2024-11-20T15:23:35,355 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, ppid=170, state=SUCCESS; CloseRegionProcedure b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 in 160 msec 2024-11-20T15:23:35,355 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=b9a29e8e077d15da0686c3415c44e050, REOPEN/MOVE; state=CLOSED, location=0b62285ead89,33387,1732116069954; forceNewPlan=false, retain=true 2024-11-20T15:23:35,506 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=170 updating hbase:meta row=b9a29e8e077d15da0686c3415c44e050, regionState=OPENING, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:23:35,507 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=170, state=RUNNABLE; OpenRegionProcedure b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954}] 2024-11-20T15:23:35,658 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:35,662 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:35,662 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7285): Opening region: {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} 2024-11-20T15:23:35,662 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:35,662 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T15:23:35,662 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7327): checking encryption for b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:35,662 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7330): checking classloading for b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:35,664 INFO [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:35,664 INFO [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:23:35,664 INFO [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b9a29e8e077d15da0686c3415c44e050 columnFamilyName A 2024-11-20T15:23:35,666 DEBUG [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:35,666 INFO [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] regionserver.HStore(327): Store=b9a29e8e077d15da0686c3415c44e050/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:23:35,666 INFO [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:35,667 INFO [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:23:35,667 INFO [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b9a29e8e077d15da0686c3415c44e050 columnFamilyName B 2024-11-20T15:23:35,667 DEBUG [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:35,668 INFO [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] regionserver.HStore(327): Store=b9a29e8e077d15da0686c3415c44e050/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:23:35,668 INFO [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:35,668 INFO [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T15:23:35,669 INFO [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b9a29e8e077d15da0686c3415c44e050 columnFamilyName C 2024-11-20T15:23:35,669 DEBUG [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:35,669 INFO [StoreOpener-b9a29e8e077d15da0686c3415c44e050-1 {}] regionserver.HStore(327): Store=b9a29e8e077d15da0686c3415c44e050/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T15:23:35,669 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:35,670 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:35,671 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:35,672 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T15:23:35,675 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1085): writing seq id for b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:35,676 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1102): Opened b9a29e8e077d15da0686c3415c44e050; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67632183, jitterRate=0.007798060774803162}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T15:23:35,676 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1001): Region open journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:35,677 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., pid=172, masterSystemTime=1732116215658 2024-11-20T15:23:35,679 DEBUG [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:35,679 INFO [RS_OPEN_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:35,679 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=170 updating hbase:meta row=b9a29e8e077d15da0686c3415c44e050, regionState=OPEN, openSeqNum=5, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:23:35,681 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=170 2024-11-20T15:23:35,681 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=170, state=SUCCESS; OpenRegionProcedure b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 in 173 msec 2024-11-20T15:23:35,683 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-11-20T15:23:35,683 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=b9a29e8e077d15da0686c3415c44e050, REOPEN/MOVE in 489 msec 2024-11-20T15:23:35,684 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=169, resume processing ppid=168 2024-11-20T15:23:35,684 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, ppid=168, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 493 msec 2024-11-20T15:23:35,686 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 907 msec 2024-11-20T15:23:35,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=168 2024-11-20T15:23:35,688 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06094c70 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5bc9c3e 2024-11-20T15:23:35,691 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fc332d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:23:35,692 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x103dfc6e to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7181df3b 2024-11-20T15:23:35,695 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17327621, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:23:35,696 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6e047c09 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11030ef5 2024-11-20T15:23:35,700 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1584f18a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:23:35,701 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x60d631a3 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@69abefea 2024-11-20T15:23:35,712 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b914bf4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:23:35,713 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58971172 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e757135 2024-11-20T15:23:35,724 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f6a59e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:23:35,725 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7846cb78 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@150e08ed 2024-11-20T15:23:35,728 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53305d9b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:23:35,729 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5f1754bc to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a3b66d3 2024-11-20T15:23:35,732 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bb6288a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:23:35,733 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d9113f3 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5cfdf76c 2024-11-20T15:23:35,744 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6556601, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:23:35,745 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5bb75907 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@68c2838a 2024-11-20T15:23:35,752 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@458a85fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:23:35,753 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c1d3a95 to 127.0.0.1:62338 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@50bf224f 2024-11-20T15:23:35,768 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@410bf0c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T15:23:35,780 DEBUG [hconnection-0x71c1c634-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:23:35,781 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35518, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:23:35,788 DEBUG [hconnection-0x731016eb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:23:35,789 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35526, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:23:35,798 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:23:35,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-11-20T15:23:35,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T15:23:35,799 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:23:35,800 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:23:35,800 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:23:35,808 DEBUG [hconnection-0x1295aeec-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:23:35,809 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35538, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:23:35,829 DEBUG [hconnection-0x2692ad3b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:23:35,831 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35548, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:23:35,840 DEBUG [hconnection-0x5fbebee6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:23:35,841 DEBUG [hconnection-0x69c10bd8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:23:35,841 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35554, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:23:35,842 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35566, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:23:35,847 DEBUG [hconnection-0x7306aa7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:23:35,848 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35570, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:23:35,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:35,854 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b9a29e8e077d15da0686c3415c44e050 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T15:23:35,854 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=A 2024-11-20T15:23:35,854 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:35,854 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=B 2024-11-20T15:23:35,854 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:35,854 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=C 2024-11-20T15:23:35,854 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:35,879 DEBUG [hconnection-0x14f3e774-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:23:35,881 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35576, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:23:35,885 DEBUG [hconnection-0x6ebb41b6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:23:35,887 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35590, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:23:35,897 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:35,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116275893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:35,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:35,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116275897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:35,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:35,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116275898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:35,900 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:35,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116275898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:35,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T15:23:35,920 DEBUG [hconnection-0x1080b2ea-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T15:23:35,921 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35594, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T15:23:35,929 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:35,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116275928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:35,943 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120938caf3a8af24e9280f14f82cb8ec9e3_b9a29e8e077d15da0686c3415c44e050 is 50, key is test_row_0/A:col10/1732116215852/Put/seqid=0 2024-11-20T15:23:35,953 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:35,954 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T15:23:35,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:35,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:35,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:35,954 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:35,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:35,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:35,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742452_1628 (size=12154) 2024-11-20T15:23:35,972 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:35,978 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120938caf3a8af24e9280f14f82cb8ec9e3_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120938caf3a8af24e9280f14f82cb8ec9e3_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:35,979 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/7a57795357a549d9a2fd0b799d9f63a3, store: [table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:35,980 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/7a57795357a549d9a2fd0b799d9f63a3 is 175, key is test_row_0/A:col10/1732116215852/Put/seqid=0 2024-11-20T15:23:35,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742453_1629 (size=30955) 2024-11-20T15:23:35,987 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/7a57795357a549d9a2fd0b799d9f63a3 2024-11-20T15:23:36,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116275999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116276001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,002 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116276001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116276001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,031 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116276031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,039 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/6700ca8a7ae644b9a5dd8d2782484ab7 is 50, key is test_row_0/B:col10/1732116215852/Put/seqid=0 2024-11-20T15:23:36,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742454_1630 (size=12001) 2024-11-20T15:23:36,058 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/6700ca8a7ae644b9a5dd8d2782484ab7 2024-11-20T15:23:36,095 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/cf2f7dd159c1466bac96005fe3cf8406 is 50, key is test_row_0/C:col10/1732116215852/Put/seqid=0 2024-11-20T15:23:36,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T15:23:36,107 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,108 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T15:23:36,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:36,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:36,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:36,108 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:36,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:36,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:36,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742455_1631 (size=12001) 2024-11-20T15:23:36,127 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/cf2f7dd159c1466bac96005fe3cf8406 2024-11-20T15:23:36,134 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/7a57795357a549d9a2fd0b799d9f63a3 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/7a57795357a549d9a2fd0b799d9f63a3 2024-11-20T15:23:36,140 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/7a57795357a549d9a2fd0b799d9f63a3, entries=150, sequenceid=15, filesize=30.2 K 2024-11-20T15:23:36,143 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/6700ca8a7ae644b9a5dd8d2782484ab7 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/6700ca8a7ae644b9a5dd8d2782484ab7 2024-11-20T15:23:36,148 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/6700ca8a7ae644b9a5dd8d2782484ab7, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T15:23:36,149 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/cf2f7dd159c1466bac96005fe3cf8406 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/cf2f7dd159c1466bac96005fe3cf8406 2024-11-20T15:23:36,154 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/cf2f7dd159c1466bac96005fe3cf8406, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T15:23:36,155 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for b9a29e8e077d15da0686c3415c44e050 in 300ms, sequenceid=15, compaction requested=false 2024-11-20T15:23:36,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:36,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:36,204 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b9a29e8e077d15da0686c3415c44e050 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T15:23:36,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=A 2024-11-20T15:23:36,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:36,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=B 2024-11-20T15:23:36,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:36,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=C 2024-11-20T15:23:36,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:36,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116276218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116276219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116276219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,224 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116276221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,225 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120807785b1ac2d48eeb04d19f17ca95a85_b9a29e8e077d15da0686c3415c44e050 is 50, key is test_row_0/A:col10/1732116215892/Put/seqid=0 2024-11-20T15:23:36,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116276234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742456_1632 (size=12154) 2024-11-20T15:23:36,255 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:36,258 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120807785b1ac2d48eeb04d19f17ca95a85_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120807785b1ac2d48eeb04d19f17ca95a85_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:36,259 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/dd117c7700024c0ea9aa3b0fcd391bf5, store: [table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:36,260 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/dd117c7700024c0ea9aa3b0fcd391bf5 is 175, key is test_row_0/A:col10/1732116215892/Put/seqid=0 2024-11-20T15:23:36,260 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T15:23:36,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:36,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:36,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:36,262 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:36,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:36,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:36,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742457_1633 (size=30955) 2024-11-20T15:23:36,285 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/dd117c7700024c0ea9aa3b0fcd391bf5 2024-11-20T15:23:36,302 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/ab9495825b044a6a93c12b4cf7dc022a is 50, key is test_row_0/B:col10/1732116215892/Put/seqid=0 2024-11-20T15:23:36,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116276323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116276324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,326 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116276325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742458_1634 (size=12001) 2024-11-20T15:23:36,333 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/ab9495825b044a6a93c12b4cf7dc022a 2024-11-20T15:23:36,353 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/1c70eed8fdf54539bda93d0d153595e0 is 50, key is test_row_0/C:col10/1732116215892/Put/seqid=0 2024-11-20T15:23:36,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742459_1635 (size=12001) 2024-11-20T15:23:36,382 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/1c70eed8fdf54539bda93d0d153595e0 2024-11-20T15:23:36,387 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/dd117c7700024c0ea9aa3b0fcd391bf5 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/dd117c7700024c0ea9aa3b0fcd391bf5 2024-11-20T15:23:36,391 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/dd117c7700024c0ea9aa3b0fcd391bf5, entries=150, sequenceid=42, filesize=30.2 K 2024-11-20T15:23:36,392 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/ab9495825b044a6a93c12b4cf7dc022a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ab9495825b044a6a93c12b4cf7dc022a 2024-11-20T15:23:36,399 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ab9495825b044a6a93c12b4cf7dc022a, entries=150, sequenceid=42, filesize=11.7 K 2024-11-20T15:23:36,401 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/1c70eed8fdf54539bda93d0d153595e0 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/1c70eed8fdf54539bda93d0d153595e0 2024-11-20T15:23:36,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T15:23:36,405 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/1c70eed8fdf54539bda93d0d153595e0, entries=150, sequenceid=42, filesize=11.7 K 2024-11-20T15:23:36,406 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=53.67 KB/54960 for b9a29e8e077d15da0686c3415c44e050 in 202ms, sequenceid=42, compaction requested=false 2024-11-20T15:23:36,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:36,415 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,415 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T15:23:36,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:36,415 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing b9a29e8e077d15da0686c3415c44e050 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T15:23:36,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=A 2024-11-20T15:23:36,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:36,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=B 2024-11-20T15:23:36,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:36,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=C 2024-11-20T15:23:36,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:36,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209df9ede376d14fb0ad2eb89de2372b63_b9a29e8e077d15da0686c3415c44e050 is 50, key is test_row_0/A:col10/1732116216212/Put/seqid=0 2024-11-20T15:23:36,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742460_1636 (size=12154) 2024-11-20T15:23:36,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:36,467 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209df9ede376d14fb0ad2eb89de2372b63_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209df9ede376d14fb0ad2eb89de2372b63_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:36,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/774715f3adcd449c974531bd9d63dce4, store: [table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:36,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/774715f3adcd449c974531bd9d63dce4 is 175, key is test_row_0/A:col10/1732116216212/Put/seqid=0 2024-11-20T15:23:36,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742461_1637 (size=30955) 2024-11-20T15:23:36,491 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/774715f3adcd449c974531bd9d63dce4 2024-11-20T15:23:36,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/08ca299d87934a51be919577123e80b0 is 50, key is test_row_0/B:col10/1732116216212/Put/seqid=0 2024-11-20T15:23:36,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:36,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:36,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742462_1638 (size=12001) 2024-11-20T15:23:36,564 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116276559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116276560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116276563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116276564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116276564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,667 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116276665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116276668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116276669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116276670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116276670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,871 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116276869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,874 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116276873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116276874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116276874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,877 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:36,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116276875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:36,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T15:23:36,934 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/08ca299d87934a51be919577123e80b0 2024-11-20T15:23:36,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/d232be41644240c4800a3bfea812ffe5 is 50, key is test_row_0/C:col10/1732116216212/Put/seqid=0 2024-11-20T15:23:36,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742463_1639 (size=12001) 2024-11-20T15:23:36,960 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/d232be41644240c4800a3bfea812ffe5 2024-11-20T15:23:36,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/774715f3adcd449c974531bd9d63dce4 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/774715f3adcd449c974531bd9d63dce4 2024-11-20T15:23:36,970 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/774715f3adcd449c974531bd9d63dce4, entries=150, sequenceid=53, filesize=30.2 K 2024-11-20T15:23:36,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/08ca299d87934a51be919577123e80b0 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/08ca299d87934a51be919577123e80b0 2024-11-20T15:23:36,977 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/08ca299d87934a51be919577123e80b0, entries=150, sequenceid=53, filesize=11.7 K 2024-11-20T15:23:36,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/d232be41644240c4800a3bfea812ffe5 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/d232be41644240c4800a3bfea812ffe5 2024-11-20T15:23:36,984 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/d232be41644240c4800a3bfea812ffe5, entries=150, sequenceid=53, filesize=11.7 K 2024-11-20T15:23:36,985 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for b9a29e8e077d15da0686c3415c44e050 in 570ms, sequenceid=53, compaction requested=true 2024-11-20T15:23:36,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:36,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:36,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-11-20T15:23:36,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-11-20T15:23:36,988 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-11-20T15:23:36,988 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1860 sec 2024-11-20T15:23:36,990 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 1.1900 sec 2024-11-20T15:23:37,185 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b9a29e8e077d15da0686c3415c44e050 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T15:23:37,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:37,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=A 2024-11-20T15:23:37,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:37,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=B 2024-11-20T15:23:37,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:37,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=C 2024-11-20T15:23:37,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:37,195 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:37,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116277190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:37,195 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:37,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116277190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:37,198 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:37,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116277195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:37,199 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:37,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116277196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:37,199 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:37,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116277197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:37,226 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120414673be641a4b16aac88c04a35ec210_b9a29e8e077d15da0686c3415c44e050 is 50, key is test_row_0/A:col10/1732116217184/Put/seqid=0 2024-11-20T15:23:37,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742464_1640 (size=12154) 2024-11-20T15:23:37,277 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,281 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120414673be641a4b16aac88c04a35ec210_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120414673be641a4b16aac88c04a35ec210_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:37,282 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/a21ad136fe9a478ebe937e1a599d8255, store: [table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:37,283 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/a21ad136fe9a478ebe937e1a599d8255 is 175, key is test_row_0/A:col10/1732116217184/Put/seqid=0 2024-11-20T15:23:37,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:37,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116277296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:37,299 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:37,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116277297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:37,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:37,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116277300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:37,303 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:37,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116277301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:37,304 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:37,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116277301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:37,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742465_1641 (size=30955) 2024-11-20T15:23:37,319 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=82, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/a21ad136fe9a478ebe937e1a599d8255 2024-11-20T15:23:37,327 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/0bf42752e8e24994b45eaef4a8acd251 is 50, key is test_row_0/B:col10/1732116217184/Put/seqid=0 2024-11-20T15:23:37,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742466_1642 (size=12001) 2024-11-20T15:23:37,351 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/0bf42752e8e24994b45eaef4a8acd251 2024-11-20T15:23:37,366 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/d9ec84991f214d06aa891a2b014968c7 is 50, key is test_row_0/C:col10/1732116217184/Put/seqid=0 2024-11-20T15:23:37,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742467_1643 (size=12001) 2024-11-20T15:23:37,397 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/d9ec84991f214d06aa891a2b014968c7 2024-11-20T15:23:37,402 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/a21ad136fe9a478ebe937e1a599d8255 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/a21ad136fe9a478ebe937e1a599d8255 2024-11-20T15:23:37,407 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/a21ad136fe9a478ebe937e1a599d8255, entries=150, sequenceid=82, filesize=30.2 K 2024-11-20T15:23:37,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/0bf42752e8e24994b45eaef4a8acd251 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/0bf42752e8e24994b45eaef4a8acd251 2024-11-20T15:23:37,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,411 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/0bf42752e8e24994b45eaef4a8acd251, entries=150, sequenceid=82, filesize=11.7 K 2024-11-20T15:23:37,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/d9ec84991f214d06aa891a2b014968c7 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/d9ec84991f214d06aa891a2b014968c7 2024-11-20T15:23:37,417 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/d9ec84991f214d06aa891a2b014968c7, entries=150, sequenceid=82, filesize=11.7 K 2024-11-20T15:23:37,417 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=33.54 KB/34350 for b9a29e8e077d15da0686c3415c44e050 in 232ms, sequenceid=82, compaction requested=true 2024-11-20T15:23:37,418 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:37,418 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:23:37,419 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 123820 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:23:37,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b9a29e8e077d15da0686c3415c44e050:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:23:37,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:37,420 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): b9a29e8e077d15da0686c3415c44e050/A is initiating minor compaction (all files) 2024-11-20T15:23:37,420 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:23:37,420 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b9a29e8e077d15da0686c3415c44e050/A in TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:37,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b9a29e8e077d15da0686c3415c44e050:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:23:37,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:37,420 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/7a57795357a549d9a2fd0b799d9f63a3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/dd117c7700024c0ea9aa3b0fcd391bf5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/774715f3adcd449c974531bd9d63dce4, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/a21ad136fe9a478ebe937e1a599d8255] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp, totalSize=120.9 K 2024-11-20T15:23:37,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b9a29e8e077d15da0686c3415c44e050:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:23:37,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:37,420 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:37,420 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. files: [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/7a57795357a549d9a2fd0b799d9f63a3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/dd117c7700024c0ea9aa3b0fcd391bf5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/774715f3adcd449c974531bd9d63dce4, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/a21ad136fe9a478ebe937e1a599d8255] 2024-11-20T15:23:37,421 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7a57795357a549d9a2fd0b799d9f63a3, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732116215836 2024-11-20T15:23:37,422 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd117c7700024c0ea9aa3b0fcd391bf5, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732116215892 2024-11-20T15:23:37,422 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:23:37,422 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): b9a29e8e077d15da0686c3415c44e050/B is initiating minor compaction (all files) 2024-11-20T15:23:37,422 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b9a29e8e077d15da0686c3415c44e050/B in TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:37,422 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/6700ca8a7ae644b9a5dd8d2782484ab7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ab9495825b044a6a93c12b4cf7dc022a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/08ca299d87934a51be919577123e80b0, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/0bf42752e8e24994b45eaef4a8acd251] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp, totalSize=46.9 K 2024-11-20T15:23:37,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,422 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 774715f3adcd449c974531bd9d63dce4, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732116216212 2024-11-20T15:23:37,422 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 6700ca8a7ae644b9a5dd8d2782484ab7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732116215836 2024-11-20T15:23:37,423 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting ab9495825b044a6a93c12b4cf7dc022a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732116215892 2024-11-20T15:23:37,423 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting a21ad136fe9a478ebe937e1a599d8255, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732116217183 2024-11-20T15:23:37,423 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 08ca299d87934a51be919577123e80b0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732116216212 2024-11-20T15:23:37,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,432 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 0bf42752e8e24994b45eaef4a8acd251, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732116217183 2024-11-20T15:23:37,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,441 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:37,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,451 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b9a29e8e077d15da0686c3415c44e050#B#compaction#559 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:37,452 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/1499798425a344f491d2463dd3be8d87 is 50, key is test_row_0/B:col10/1732116217184/Put/seqid=0 2024-11-20T15:23:37,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,460 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120ebb8a5ea233a46f8b75fb81c3d64f9be_b9a29e8e077d15da0686c3415c44e050 store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:37,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,463 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120ebb8a5ea233a46f8b75fb81c3d64f9be_b9a29e8e077d15da0686c3415c44e050, store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:37,463 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ebb8a5ea233a46f8b75fb81c3d64f9be_b9a29e8e077d15da0686c3415c44e050 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:37,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742468_1644 (size=12139) 2024-11-20T15:23:37,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:37,520 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b9a29e8e077d15da0686c3415c44e050 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T15:23:37,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=A 2024-11-20T15:23:37,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:37,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=B 2024-11-20T15:23:37,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:37,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=C 2024-11-20T15:23:37,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:37,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742469_1645 (size=4469) 2024-11-20T15:23:37,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,532 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b9a29e8e077d15da0686c3415c44e050#A#compaction#558 average throughput is 0.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:37,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,533 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/08135c9f4f5045eb808024472faab16e is 175, key is test_row_0/A:col10/1732116217184/Put/seqid=0 2024-11-20T15:23:37,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,550 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112020ae1e55ba144cde8c9354ae47254079_b9a29e8e077d15da0686c3415c44e050 is 50, key is test_row_0/A:col10/1732116217513/Put/seqid=0 2024-11-20T15:23:37,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:37,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116277577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:37,587 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:37,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116277579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:37,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742470_1646 (size=31093) 2024-11-20T15:23:37,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:37,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116277580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:37,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:37,591 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:37,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116277581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:37,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116277582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:37,596 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/08135c9f4f5045eb808024472faab16e as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/08135c9f4f5045eb808024472faab16e 2024-11-20T15:23:37,603 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b9a29e8e077d15da0686c3415c44e050/A of b9a29e8e077d15da0686c3415c44e050 into 08135c9f4f5045eb808024472faab16e(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:37,603 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:37,603 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., storeName=b9a29e8e077d15da0686c3415c44e050/A, priority=12, startTime=1732116217418; duration=0sec 2024-11-20T15:23:37,603 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:37,603 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b9a29e8e077d15da0686c3415c44e050:A 2024-11-20T15:23:37,603 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:23:37,605 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:23:37,605 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): b9a29e8e077d15da0686c3415c44e050/C is initiating minor compaction (all files) 2024-11-20T15:23:37,605 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b9a29e8e077d15da0686c3415c44e050/C in TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:37,605 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/cf2f7dd159c1466bac96005fe3cf8406, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/1c70eed8fdf54539bda93d0d153595e0, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/d232be41644240c4800a3bfea812ffe5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/d9ec84991f214d06aa891a2b014968c7] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp, totalSize=46.9 K 2024-11-20T15:23:37,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742471_1647 (size=19474) 2024-11-20T15:23:37,612 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf2f7dd159c1466bac96005fe3cf8406, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732116215836 2024-11-20T15:23:37,612 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,612 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c70eed8fdf54539bda93d0d153595e0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732116215892 2024-11-20T15:23:37,613 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting d232be41644240c4800a3bfea812ffe5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732116216212 2024-11-20T15:23:37,614 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9ec84991f214d06aa891a2b014968c7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732116217183 2024-11-20T15:23:37,617 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112020ae1e55ba144cde8c9354ae47254079_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112020ae1e55ba144cde8c9354ae47254079_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:37,618 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/47f2ad5ae41d4ad886fb113ad415bfd8, store: [table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:37,619 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/47f2ad5ae41d4ad886fb113ad415bfd8 is 175, key is test_row_0/A:col10/1732116217513/Put/seqid=0 2024-11-20T15:23:37,635 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b9a29e8e077d15da0686c3415c44e050#C#compaction#561 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:37,635 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/dda5acad070e4cc78b08196a67a0f39c is 50, key is test_row_0/C:col10/1732116217184/Put/seqid=0 2024-11-20T15:23:37,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742472_1648 (size=56733) 2024-11-20T15:23:37,652 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/47f2ad5ae41d4ad886fb113ad415bfd8 2024-11-20T15:23:37,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742473_1649 (size=12139) 2024-11-20T15:23:37,672 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/dda5acad070e4cc78b08196a67a0f39c as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/dda5acad070e4cc78b08196a67a0f39c 2024-11-20T15:23:37,674 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/9a97685e32c741efbcb963ce6da24567 is 50, key is test_row_0/B:col10/1732116217513/Put/seqid=0 2024-11-20T15:23:37,676 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b9a29e8e077d15da0686c3415c44e050/C of b9a29e8e077d15da0686c3415c44e050 into dda5acad070e4cc78b08196a67a0f39c(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:37,676 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:37,676 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., storeName=b9a29e8e077d15da0686c3415c44e050/C, priority=12, startTime=1732116217420; duration=0sec 2024-11-20T15:23:37,677 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:37,677 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b9a29e8e077d15da0686c3415c44e050:C 2024-11-20T15:23:37,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742474_1650 (size=12001) 2024-11-20T15:23:37,681 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/9a97685e32c741efbcb963ce6da24567 2024-11-20T15:23:37,687 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:37,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116277685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:37,688 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/769a8c312f814f13af2ef15d849f5b96 is 50, key is test_row_0/C:col10/1732116217513/Put/seqid=0 2024-11-20T15:23:37,690 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:37,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116277689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:37,694 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:37,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116277693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:37,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:37,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116277693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:37,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:37,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116277694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:37,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742475_1651 (size=12001) 2024-11-20T15:23:37,710 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/769a8c312f814f13af2ef15d849f5b96 2024-11-20T15:23:37,716 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/47f2ad5ae41d4ad886fb113ad415bfd8 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/47f2ad5ae41d4ad886fb113ad415bfd8 2024-11-20T15:23:37,721 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/47f2ad5ae41d4ad886fb113ad415bfd8, entries=300, sequenceid=94, filesize=55.4 K 2024-11-20T15:23:37,721 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/9a97685e32c741efbcb963ce6da24567 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/9a97685e32c741efbcb963ce6da24567 2024-11-20T15:23:37,727 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/9a97685e32c741efbcb963ce6da24567, entries=150, sequenceid=94, filesize=11.7 K 2024-11-20T15:23:37,729 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/769a8c312f814f13af2ef15d849f5b96 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/769a8c312f814f13af2ef15d849f5b96 2024-11-20T15:23:37,735 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/769a8c312f814f13af2ef15d849f5b96, entries=150, sequenceid=94, filesize=11.7 K 2024-11-20T15:23:37,736 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for b9a29e8e077d15da0686c3415c44e050 in 216ms, sequenceid=94, compaction requested=false 2024-11-20T15:23:37,737 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:37,892 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b9a29e8e077d15da0686c3415c44e050 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T15:23:37,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=A 2024-11-20T15:23:37,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:37,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=B 2024-11-20T15:23:37,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:37,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=C 2024-11-20T15:23:37,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:37,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:37,902 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120846f5b1ce8614af7bc6e81e7c736a982_b9a29e8e077d15da0686c3415c44e050 is 50, key is test_row_0/A:col10/1732116217891/Put/seqid=0 2024-11-20T15:23:37,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T15:23:37,905 INFO [Thread-2754 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-11-20T15:23:37,906 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:23:37,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees 2024-11-20T15:23:37,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T15:23:37,907 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:23:37,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:37,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116277902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:37,909 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:23:37,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:37,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116277903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:37,909 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:23:37,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:37,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116277904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:37,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:37,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116277906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:37,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:37,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116277908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:37,917 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/1499798425a344f491d2463dd3be8d87 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/1499798425a344f491d2463dd3be8d87 2024-11-20T15:23:37,922 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b9a29e8e077d15da0686c3415c44e050/B of b9a29e8e077d15da0686c3415c44e050 into 1499798425a344f491d2463dd3be8d87(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:37,922 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:37,922 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., storeName=b9a29e8e077d15da0686c3415c44e050/B, priority=12, startTime=1732116217420; duration=0sec 2024-11-20T15:23:37,923 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:37,923 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b9a29e8e077d15da0686c3415c44e050:B 2024-11-20T15:23:37,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742476_1652 (size=14594) 2024-11-20T15:23:37,942 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:37,948 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120846f5b1ce8614af7bc6e81e7c736a982_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120846f5b1ce8614af7bc6e81e7c736a982_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:37,949 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/8e0aa2d88527499fa3a07443352c44fa, store: [table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:37,950 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/8e0aa2d88527499fa3a07443352c44fa is 175, key is test_row_0/A:col10/1732116217891/Put/seqid=0 2024-11-20T15:23:37,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742477_1653 (size=39549) 2024-11-20T15:23:37,976 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=121, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/8e0aa2d88527499fa3a07443352c44fa 2024-11-20T15:23:37,985 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/4d019217366b47d09037bb62dc28cea8 is 50, key is test_row_0/B:col10/1732116217891/Put/seqid=0 2024-11-20T15:23:38,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T15:23:38,013 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:38,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116278010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:38,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742478_1654 (size=12001) 2024-11-20T15:23:38,013 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:38,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116278010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:38,014 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:38,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116278010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:38,014 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/4d019217366b47d09037bb62dc28cea8 2024-11-20T15:23:38,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:38,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116278012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:38,017 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:38,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116278014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:38,024 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/5a28ccb1e64e4cb386df1c37b73c63f2 is 50, key is test_row_0/C:col10/1732116217891/Put/seqid=0 2024-11-20T15:23:38,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742479_1655 (size=12001) 2024-11-20T15:23:38,056 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/5a28ccb1e64e4cb386df1c37b73c63f2 2024-11-20T15:23:38,061 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:38,061 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-20T15:23:38,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:38,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:38,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:38,061 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:38,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:38,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:38,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/8e0aa2d88527499fa3a07443352c44fa as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/8e0aa2d88527499fa3a07443352c44fa 2024-11-20T15:23:38,077 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/8e0aa2d88527499fa3a07443352c44fa, entries=200, sequenceid=121, filesize=38.6 K 2024-11-20T15:23:38,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/4d019217366b47d09037bb62dc28cea8 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/4d019217366b47d09037bb62dc28cea8 2024-11-20T15:23:38,085 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/4d019217366b47d09037bb62dc28cea8, entries=150, sequenceid=121, filesize=11.7 K 2024-11-20T15:23:38,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/5a28ccb1e64e4cb386df1c37b73c63f2 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/5a28ccb1e64e4cb386df1c37b73c63f2 2024-11-20T15:23:38,099 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/5a28ccb1e64e4cb386df1c37b73c63f2, entries=150, sequenceid=121, filesize=11.7 K 2024-11-20T15:23:38,108 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for b9a29e8e077d15da0686c3415c44e050 in 216ms, sequenceid=121, compaction requested=true 2024-11-20T15:23:38,108 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:38,109 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:38,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b9a29e8e077d15da0686c3415c44e050:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:23:38,110 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 127375 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:38,111 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): b9a29e8e077d15da0686c3415c44e050/A is initiating minor compaction (all files) 2024-11-20T15:23:38,111 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b9a29e8e077d15da0686c3415c44e050/A in TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:38,111 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/08135c9f4f5045eb808024472faab16e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/47f2ad5ae41d4ad886fb113ad415bfd8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/8e0aa2d88527499fa3a07443352c44fa] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp, totalSize=124.4 K 2024-11-20T15:23:38,111 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:38,111 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. files: [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/08135c9f4f5045eb808024472faab16e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/47f2ad5ae41d4ad886fb113ad415bfd8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/8e0aa2d88527499fa3a07443352c44fa] 2024-11-20T15:23:38,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:38,111 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:38,111 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 08135c9f4f5045eb808024472faab16e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732116217183 2024-11-20T15:23:38,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b9a29e8e077d15da0686c3415c44e050:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:23:38,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:38,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b9a29e8e077d15da0686c3415c44e050:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:23:38,112 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 47f2ad5ae41d4ad886fb113ad415bfd8, keycount=300, bloomtype=ROW, size=55.4 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732116217194 2024-11-20T15:23:38,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:38,112 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:38,112 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8e0aa2d88527499fa3a07443352c44fa, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1732116217577 2024-11-20T15:23:38,112 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): b9a29e8e077d15da0686c3415c44e050/B is initiating minor compaction (all files) 2024-11-20T15:23:38,112 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b9a29e8e077d15da0686c3415c44e050/B in TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:38,112 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/1499798425a344f491d2463dd3be8d87, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/9a97685e32c741efbcb963ce6da24567, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/4d019217366b47d09037bb62dc28cea8] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp, totalSize=35.3 K 2024-11-20T15:23:38,113 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 1499798425a344f491d2463dd3be8d87, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732116217183 2024-11-20T15:23:38,113 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a97685e32c741efbcb963ce6da24567, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732116217513 2024-11-20T15:23:38,113 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 4d019217366b47d09037bb62dc28cea8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1732116217579 2024-11-20T15:23:38,138 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b9a29e8e077d15da0686c3415c44e050#B#compaction#567 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:38,139 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/45c57cf3511642499ff4971e4e1060d4 is 50, key is test_row_0/B:col10/1732116217891/Put/seqid=0 2024-11-20T15:23:38,150 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:38,165 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411209d59808306de409b9a9b418f04eebd40_b9a29e8e077d15da0686c3415c44e050 store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:38,166 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411209d59808306de409b9a9b418f04eebd40_b9a29e8e077d15da0686c3415c44e050, store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:38,167 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209d59808306de409b9a9b418f04eebd40_b9a29e8e077d15da0686c3415c44e050 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:38,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742480_1656 (size=12241) 2024-11-20T15:23:38,189 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/45c57cf3511642499ff4971e4e1060d4 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/45c57cf3511642499ff4971e4e1060d4 2024-11-20T15:23:38,195 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b9a29e8e077d15da0686c3415c44e050/B of b9a29e8e077d15da0686c3415c44e050 into 45c57cf3511642499ff4971e4e1060d4(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:38,195 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:38,195 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., storeName=b9a29e8e077d15da0686c3415c44e050/B, priority=13, startTime=1732116218111; duration=0sec 2024-11-20T15:23:38,195 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:38,195 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b9a29e8e077d15da0686c3415c44e050:B 2024-11-20T15:23:38,195 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:38,198 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:38,198 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): b9a29e8e077d15da0686c3415c44e050/C is initiating minor compaction (all files) 2024-11-20T15:23:38,198 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b9a29e8e077d15da0686c3415c44e050/C in TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:38,198 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/dda5acad070e4cc78b08196a67a0f39c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/769a8c312f814f13af2ef15d849f5b96, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/5a28ccb1e64e4cb386df1c37b73c63f2] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp, totalSize=35.3 K 2024-11-20T15:23:38,198 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting dda5acad070e4cc78b08196a67a0f39c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732116217183 2024-11-20T15:23:38,199 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 769a8c312f814f13af2ef15d849f5b96, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732116217513 2024-11-20T15:23:38,206 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a28ccb1e64e4cb386df1c37b73c63f2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1732116217579 2024-11-20T15:23:38,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T15:23:38,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742481_1657 (size=4469) 2024-11-20T15:23:38,214 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:38,214 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-20T15:23:38,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:38,214 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2837): Flushing b9a29e8e077d15da0686c3415c44e050 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T15:23:38,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=A 2024-11-20T15:23:38,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:38,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=B 2024-11-20T15:23:38,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:38,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=C 2024-11-20T15:23:38,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:38,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:38,218 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:38,228 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b9a29e8e077d15da0686c3415c44e050#C#compaction#569 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:38,229 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/2212a25e1e6d4aa6a78cfcb3e88751e8 is 50, key is test_row_0/C:col10/1732116217891/Put/seqid=0 2024-11-20T15:23:38,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c5461348f0c649f0bdc446de3fc88290_b9a29e8e077d15da0686c3415c44e050 is 50, key is test_row_0/A:col10/1732116217901/Put/seqid=0 2024-11-20T15:23:38,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742483_1659 (size=12304) 2024-11-20T15:23:38,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:38,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116278254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:38,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:38,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116278255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:38,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:38,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116278256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:38,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:38,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116278258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:38,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742482_1658 (size=12241) 2024-11-20T15:23:38,265 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:38,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116278263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:38,363 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:38,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116278362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:38,364 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:38,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116278362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:38,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:38,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116278363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:38,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:38,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116278364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:38,368 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:38,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116278366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:38,386 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T15:23:38,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T15:23:38,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:38,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116278566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:38,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:38,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116278567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:38,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:38,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116278567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:38,570 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:38,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116278569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:38,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:38,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116278569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:38,615 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b9a29e8e077d15da0686c3415c44e050#A#compaction#568 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:38,616 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/ac09a5e1893044dcb5ed373402f532b3 is 175, key is test_row_0/A:col10/1732116217891/Put/seqid=0 2024-11-20T15:23:38,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742484_1660 (size=31195) 2024-11-20T15:23:38,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:38,658 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c5461348f0c649f0bdc446de3fc88290_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c5461348f0c649f0bdc446de3fc88290_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:38,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/f52cd2c506d54eb58db048b806cf60f8, store: [table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:38,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/f52cd2c506d54eb58db048b806cf60f8 is 175, key is test_row_0/A:col10/1732116217901/Put/seqid=0 2024-11-20T15:23:38,670 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/2212a25e1e6d4aa6a78cfcb3e88751e8 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/2212a25e1e6d4aa6a78cfcb3e88751e8 2024-11-20T15:23:38,676 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b9a29e8e077d15da0686c3415c44e050/C of b9a29e8e077d15da0686c3415c44e050 into 2212a25e1e6d4aa6a78cfcb3e88751e8(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:38,676 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:38,676 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., storeName=b9a29e8e077d15da0686c3415c44e050/C, priority=13, startTime=1732116218112; duration=0sec 2024-11-20T15:23:38,676 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:38,676 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b9a29e8e077d15da0686c3415c44e050:C 2024-11-20T15:23:38,681 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T15:23:38,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742485_1661 (size=31105) 2024-11-20T15:23:38,682 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=134, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/f52cd2c506d54eb58db048b806cf60f8 2024-11-20T15:23:38,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/8244469008ba4b408e84284c80396e96 is 50, key is test_row_0/B:col10/1732116217901/Put/seqid=0 2024-11-20T15:23:38,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742486_1662 (size=12151) 2024-11-20T15:23:38,871 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:38,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116278868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:38,875 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:38,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116278872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:38,875 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:38,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116278872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:38,876 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:38,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116278872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:38,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:38,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116278874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:39,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T15:23:39,036 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/ac09a5e1893044dcb5ed373402f532b3 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/ac09a5e1893044dcb5ed373402f532b3 2024-11-20T15:23:39,040 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b9a29e8e077d15da0686c3415c44e050/A of b9a29e8e077d15da0686c3415c44e050 into ac09a5e1893044dcb5ed373402f532b3(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:39,040 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:39,040 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., storeName=b9a29e8e077d15da0686c3415c44e050/A, priority=13, startTime=1732116218108; duration=0sec 2024-11-20T15:23:39,040 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:39,040 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b9a29e8e077d15da0686c3415c44e050:A 2024-11-20T15:23:39,189 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/8244469008ba4b408e84284c80396e96 2024-11-20T15:23:39,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/1f3eda66c97c4598a102868c8707b000 is 50, key is test_row_0/C:col10/1732116217901/Put/seqid=0 2024-11-20T15:23:39,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742487_1663 (size=12151) 2024-11-20T15:23:39,237 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/1f3eda66c97c4598a102868c8707b000 2024-11-20T15:23:39,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/f52cd2c506d54eb58db048b806cf60f8 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/f52cd2c506d54eb58db048b806cf60f8 2024-11-20T15:23:39,249 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/f52cd2c506d54eb58db048b806cf60f8, entries=150, sequenceid=134, filesize=30.4 K 2024-11-20T15:23:39,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/8244469008ba4b408e84284c80396e96 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/8244469008ba4b408e84284c80396e96 2024-11-20T15:23:39,257 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/8244469008ba4b408e84284c80396e96, entries=150, sequenceid=134, filesize=11.9 K 2024-11-20T15:23:39,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/1f3eda66c97c4598a102868c8707b000 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/1f3eda66c97c4598a102868c8707b000 2024-11-20T15:23:39,263 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/1f3eda66c97c4598a102868c8707b000, entries=150, sequenceid=134, filesize=11.9 K 2024-11-20T15:23:39,264 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for b9a29e8e077d15da0686c3415c44e050 in 1050ms, sequenceid=134, compaction requested=false 2024-11-20T15:23:39,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2538): Flush status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:39,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:39,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-11-20T15:23:39,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=176 2024-11-20T15:23:39,269 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-11-20T15:23:39,269 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3570 sec 2024-11-20T15:23:39,271 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees in 1.3640 sec 2024-11-20T15:23:39,380 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b9a29e8e077d15da0686c3415c44e050 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T15:23:39,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=A 2024-11-20T15:23:39,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:39,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=B 2024-11-20T15:23:39,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:39,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=C 2024-11-20T15:23:39,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:39,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:39,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:39,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116279389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:39,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:39,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116279389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:39,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:39,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116279391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:39,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:39,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116279394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:39,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:39,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116279395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:39,398 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209cf3aee0781b4dc0b60641612e8ac651_b9a29e8e077d15da0686c3415c44e050 is 50, key is test_row_0/A:col10/1732116218257/Put/seqid=0 2024-11-20T15:23:39,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742488_1664 (size=12304) 2024-11-20T15:23:39,425 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,430 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209cf3aee0781b4dc0b60641612e8ac651_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209cf3aee0781b4dc0b60641612e8ac651_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:39,431 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/6a7a8fef53124bb0a9142d69878d08f7, store: [table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:39,431 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/6a7a8fef53124bb0a9142d69878d08f7 is 175, key is test_row_0/A:col10/1732116218257/Put/seqid=0 2024-11-20T15:23:39,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742489_1665 (size=31105) 2024-11-20T15:23:39,500 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:39,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116279497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:39,501 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:39,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116279498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:39,501 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:39,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116279498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:39,501 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:39,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116279499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:39,502 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:39,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116279499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:39,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:39,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116279702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:39,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:39,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116279703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:39,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:39,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116279703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:39,705 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:39,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116279704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:39,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:39,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116279704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:39,861 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=162, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/6a7a8fef53124bb0a9142d69878d08f7 2024-11-20T15:23:39,869 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/85b7160fabce43babca87488befefe65 is 50, key is test_row_0/B:col10/1732116218257/Put/seqid=0 2024-11-20T15:23:39,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742490_1666 (size=12151) 2024-11-20T15:23:39,903 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=162 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/85b7160fabce43babca87488befefe65 2024-11-20T15:23:39,913 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/f464e768de334fc094ab6ee1a08f2ca5 is 50, key is test_row_0/C:col10/1732116218257/Put/seqid=0 2024-11-20T15:23:39,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742491_1667 (size=12151) 2024-11-20T15:23:39,948 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=162 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/f464e768de334fc094ab6ee1a08f2ca5 2024-11-20T15:23:39,954 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/6a7a8fef53124bb0a9142d69878d08f7 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/6a7a8fef53124bb0a9142d69878d08f7 2024-11-20T15:23:39,960 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/6a7a8fef53124bb0a9142d69878d08f7, entries=150, sequenceid=162, filesize=30.4 K 2024-11-20T15:23:39,961 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/85b7160fabce43babca87488befefe65 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/85b7160fabce43babca87488befefe65 2024-11-20T15:23:39,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,966 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/85b7160fabce43babca87488befefe65, entries=150, sequenceid=162, filesize=11.9 K 2024-11-20T15:23:39,967 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/f464e768de334fc094ab6ee1a08f2ca5 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/f464e768de334fc094ab6ee1a08f2ca5 2024-11-20T15:23:39,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,972 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/f464e768de334fc094ab6ee1a08f2ca5, entries=150, sequenceid=162, filesize=11.9 K 2024-11-20T15:23:39,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,973 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for b9a29e8e077d15da0686c3415c44e050 in 594ms, sequenceid=162, compaction requested=true 2024-11-20T15:23:39,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:39,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b9a29e8e077d15da0686c3415c44e050:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:23:39,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:39,973 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:39,973 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:39,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b9a29e8e077d15da0686c3415c44e050:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:23:39,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:39,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b9a29e8e077d15da0686c3415c44e050:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:23:39,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:39,974 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36543 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:39,974 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93405 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:39,974 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): b9a29e8e077d15da0686c3415c44e050/B is initiating minor compaction (all files) 2024-11-20T15:23:39,974 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): b9a29e8e077d15da0686c3415c44e050/A is initiating minor compaction (all files) 2024-11-20T15:23:39,974 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b9a29e8e077d15da0686c3415c44e050/A in TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:39,974 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b9a29e8e077d15da0686c3415c44e050/B in TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:39,975 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/ac09a5e1893044dcb5ed373402f532b3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/f52cd2c506d54eb58db048b806cf60f8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/6a7a8fef53124bb0a9142d69878d08f7] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp, totalSize=91.2 K 2024-11-20T15:23:39,975 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/45c57cf3511642499ff4971e4e1060d4, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/8244469008ba4b408e84284c80396e96, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/85b7160fabce43babca87488befefe65] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp, totalSize=35.7 K 2024-11-20T15:23:39,975 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:39,975 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. files: [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/ac09a5e1893044dcb5ed373402f532b3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/f52cd2c506d54eb58db048b806cf60f8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/6a7a8fef53124bb0a9142d69878d08f7] 2024-11-20T15:23:39,975 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 45c57cf3511642499ff4971e4e1060d4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1732116217579 2024-11-20T15:23:39,975 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac09a5e1893044dcb5ed373402f532b3, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1732116217579 2024-11-20T15:23:39,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,975 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 8244469008ba4b408e84284c80396e96, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732116217901 2024-11-20T15:23:39,976 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting f52cd2c506d54eb58db048b806cf60f8, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732116217901 2024-11-20T15:23:39,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,976 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 85b7160fabce43babca87488befefe65, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732116218256 2024-11-20T15:23:39,976 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a7a8fef53124bb0a9142d69878d08f7, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732116218256 2024-11-20T15:23:39,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,998 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b9a29e8e077d15da0686c3415c44e050#B#compaction#576 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:39,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,999 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/f66613cb22f34b709b4f6939d315cf01 is 50, key is test_row_0/B:col10/1732116218257/Put/seqid=0 2024-11-20T15:23:39,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:39,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,004 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:40,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,009 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112041cc272c43b6444cb724ed86fb892890_b9a29e8e077d15da0686c3415c44e050 store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:40,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,011 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112041cc272c43b6444cb724ed86fb892890_b9a29e8e077d15da0686c3415c44e050, store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:40,011 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112041cc272c43b6444cb724ed86fb892890_b9a29e8e077d15da0686c3415c44e050 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:40,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742492_1668 (size=12493) 2024-11-20T15:23:40,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T15:23:40,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:40,012 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b9a29e8e077d15da0686c3415c44e050 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T15:23:40,012 INFO [Thread-2754 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-11-20T15:23:40,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=A 2024-11-20T15:23:40,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:40,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=B 2024-11-20T15:23:40,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:40,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=C 2024-11-20T15:23:40,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:40,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,015 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:23:40,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees 2024-11-20T15:23:40,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T15:23:40,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,018 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:23:40,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,019 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:23:40,019 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:23:40,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742493_1669 (size=4469) 2024-11-20T15:23:40,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,047 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f516a2ff5ee24cb69e32b9df8ef86094_b9a29e8e077d15da0686c3415c44e050 is 50, key is test_row_0/A:col10/1732116220008/Put/seqid=0 2024-11-20T15:23:40,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742494_1670 (size=12304) 2024-11-20T15:23:40,064 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,068 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f516a2ff5ee24cb69e32b9df8ef86094_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f516a2ff5ee24cb69e32b9df8ef86094_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:40,069 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/c9f2093150e2406e98960cb3036def06, store: [table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:40,069 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/c9f2093150e2406e98960cb3036def06 is 175, key is test_row_0/A:col10/1732116220008/Put/seqid=0 2024-11-20T15:23:40,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742495_1671 (size=31105) 2024-11-20T15:23:40,079 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=173, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/c9f2093150e2406e98960cb3036def06 2024-11-20T15:23:40,085 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/3ef0eb2ae314437db15a8ba5f1748c34 is 50, key is test_row_0/B:col10/1732116220008/Put/seqid=0 2024-11-20T15:23:40,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742496_1672 (size=12151) 2024-11-20T15:23:40,089 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/3ef0eb2ae314437db15a8ba5f1748c34 2024-11-20T15:23:40,095 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/c1e67c0aa4d2472e809c3e9fa2add2c4 is 50, key is test_row_0/C:col10/1732116220008/Put/seqid=0 2024-11-20T15:23:40,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742497_1673 (size=12151) 2024-11-20T15:23:40,102 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:40,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:40,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116280055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116280056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:40,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116280103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:40,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116280105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:40,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116280105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T15:23:40,171 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,171 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T15:23:40,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:40,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:40,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:40,172 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:40,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:40,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:40,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:40,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116280207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:40,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116280207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:40,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116280207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:40,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116280208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:40,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116280208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T15:23:40,323 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,324 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T15:23:40,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:40,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:40,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:40,324 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:40,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:40,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:40,412 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:40,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116280410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:40,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116280411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,414 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:40,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116280412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,414 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:40,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116280412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,415 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:40,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116280412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,418 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/f66613cb22f34b709b4f6939d315cf01 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/f66613cb22f34b709b4f6939d315cf01 2024-11-20T15:23:40,425 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b9a29e8e077d15da0686c3415c44e050/B of b9a29e8e077d15da0686c3415c44e050 into f66613cb22f34b709b4f6939d315cf01(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:40,425 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:40,425 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., storeName=b9a29e8e077d15da0686c3415c44e050/B, priority=13, startTime=1732116219973; duration=0sec 2024-11-20T15:23:40,426 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:40,426 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b9a29e8e077d15da0686c3415c44e050:B 2024-11-20T15:23:40,426 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:40,427 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36543 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:40,427 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): b9a29e8e077d15da0686c3415c44e050/C is initiating minor compaction (all files) 2024-11-20T15:23:40,427 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b9a29e8e077d15da0686c3415c44e050/C in TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:40,427 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/2212a25e1e6d4aa6a78cfcb3e88751e8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/1f3eda66c97c4598a102868c8707b000, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/f464e768de334fc094ab6ee1a08f2ca5] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp, totalSize=35.7 K 2024-11-20T15:23:40,428 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 2212a25e1e6d4aa6a78cfcb3e88751e8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1732116217579 2024-11-20T15:23:40,428 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f3eda66c97c4598a102868c8707b000, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732116217901 2024-11-20T15:23:40,428 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting f464e768de334fc094ab6ee1a08f2ca5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732116218256 2024-11-20T15:23:40,436 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b9a29e8e077d15da0686c3415c44e050#C#compaction#581 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:40,436 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/e1c9698fe0a44000b830663d9255ad89 is 50, key is test_row_0/C:col10/1732116218257/Put/seqid=0 2024-11-20T15:23:40,444 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b9a29e8e077d15da0686c3415c44e050#A#compaction#577 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:40,444 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/46577b5ff4684cd395ae2955f51830d8 is 175, key is test_row_0/A:col10/1732116218257/Put/seqid=0 2024-11-20T15:23:40,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742498_1674 (size=12493) 2024-11-20T15:23:40,459 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/e1c9698fe0a44000b830663d9255ad89 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/e1c9698fe0a44000b830663d9255ad89 2024-11-20T15:23:40,464 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b9a29e8e077d15da0686c3415c44e050/C of b9a29e8e077d15da0686c3415c44e050 into e1c9698fe0a44000b830663d9255ad89(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:40,464 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:40,464 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., storeName=b9a29e8e077d15da0686c3415c44e050/C, priority=13, startTime=1732116219974; duration=0sec 2024-11-20T15:23:40,464 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:40,464 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b9a29e8e077d15da0686c3415c44e050:C 2024-11-20T15:23:40,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742499_1675 (size=31447) 2024-11-20T15:23:40,479 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/46577b5ff4684cd395ae2955f51830d8 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/46577b5ff4684cd395ae2955f51830d8 2024-11-20T15:23:40,485 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,486 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T15:23:40,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:40,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:40,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:40,486 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:40,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:40,488 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b9a29e8e077d15da0686c3415c44e050/A of b9a29e8e077d15da0686c3415c44e050 into 46577b5ff4684cd395ae2955f51830d8(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:40,488 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:40,488 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., storeName=b9a29e8e077d15da0686c3415c44e050/A, priority=13, startTime=1732116219973; duration=0sec 2024-11-20T15:23:40,488 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:40,488 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b9a29e8e077d15da0686c3415c44e050:A 2024-11-20T15:23:40,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:40,500 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/c1e67c0aa4d2472e809c3e9fa2add2c4 2024-11-20T15:23:40,505 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/c9f2093150e2406e98960cb3036def06 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/c9f2093150e2406e98960cb3036def06 2024-11-20T15:23:40,512 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/c9f2093150e2406e98960cb3036def06, entries=150, sequenceid=173, filesize=30.4 K 2024-11-20T15:23:40,514 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/3ef0eb2ae314437db15a8ba5f1748c34 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/3ef0eb2ae314437db15a8ba5f1748c34 2024-11-20T15:23:40,519 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/3ef0eb2ae314437db15a8ba5f1748c34, entries=150, sequenceid=173, filesize=11.9 K 2024-11-20T15:23:40,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/c1e67c0aa4d2472e809c3e9fa2add2c4 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/c1e67c0aa4d2472e809c3e9fa2add2c4 2024-11-20T15:23:40,525 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/c1e67c0aa4d2472e809c3e9fa2add2c4, entries=150, sequenceid=173, filesize=11.9 K 2024-11-20T15:23:40,526 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for b9a29e8e077d15da0686c3415c44e050 in 514ms, sequenceid=173, compaction requested=false 2024-11-20T15:23:40,526 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:40,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T15:23:40,640 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,641 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T15:23:40,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:40,641 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2837): Flushing b9a29e8e077d15da0686c3415c44e050 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T15:23:40,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=A 2024-11-20T15:23:40,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:40,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=B 2024-11-20T15:23:40,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:40,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=C 2024-11-20T15:23:40,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:40,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201eb02666e07c4d3a851a5a762702cbfd_b9a29e8e077d15da0686c3415c44e050 is 50, key is test_row_0/A:col10/1732116220050/Put/seqid=0 2024-11-20T15:23:40,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742500_1676 (size=12304) 2024-11-20T15:23:40,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:40,698 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201eb02666e07c4d3a851a5a762702cbfd_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201eb02666e07c4d3a851a5a762702cbfd_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:40,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/166449c9a037499588eca55a8c870eb5, store: [table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:40,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/166449c9a037499588eca55a8c870eb5 is 175, key is test_row_0/A:col10/1732116220050/Put/seqid=0 2024-11-20T15:23:40,718 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:40,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:40,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742501_1677 (size=31105) 2024-11-20T15:23:40,724 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=201, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/166449c9a037499588eca55a8c870eb5 2024-11-20T15:23:40,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:40,730 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:40,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116280723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116280725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,731 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:40,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116280727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,731 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:40,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116280727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,733 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:40,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116280730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/f7977095500a43eb8ebf2bc8f9b40c6c is 50, key is test_row_0/B:col10/1732116220050/Put/seqid=0 2024-11-20T15:23:40,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742502_1678 (size=12151) 2024-11-20T15:23:40,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:40,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116280831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:40,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116280831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:40,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116280832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,835 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:40,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116280833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:40,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:40,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116280834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:41,035 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:41,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116281034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:41,039 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:41,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116281036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:41,039 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:41,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116281037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:41,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:41,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116281037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:41,040 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:41,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116281038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:41,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T15:23:41,185 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/f7977095500a43eb8ebf2bc8f9b40c6c 2024-11-20T15:23:41,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/2098147f60fd494d986e487bcf450bba is 50, key is test_row_0/C:col10/1732116220050/Put/seqid=0 2024-11-20T15:23:41,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742503_1679 (size=12151) 2024-11-20T15:23:41,243 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/2098147f60fd494d986e487bcf450bba 2024-11-20T15:23:41,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/166449c9a037499588eca55a8c870eb5 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/166449c9a037499588eca55a8c870eb5 2024-11-20T15:23:41,260 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/166449c9a037499588eca55a8c870eb5, entries=150, sequenceid=201, filesize=30.4 K 2024-11-20T15:23:41,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/f7977095500a43eb8ebf2bc8f9b40c6c as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/f7977095500a43eb8ebf2bc8f9b40c6c 2024-11-20T15:23:41,269 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/f7977095500a43eb8ebf2bc8f9b40c6c, entries=150, sequenceid=201, filesize=11.9 K 2024-11-20T15:23:41,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/2098147f60fd494d986e487bcf450bba as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/2098147f60fd494d986e487bcf450bba 2024-11-20T15:23:41,291 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/2098147f60fd494d986e487bcf450bba, entries=150, sequenceid=201, filesize=11.9 K 2024-11-20T15:23:41,293 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for b9a29e8e077d15da0686c3415c44e050 in 652ms, sequenceid=201, compaction requested=true 2024-11-20T15:23:41,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2538): Flush status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:41,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:41,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-11-20T15:23:41,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=178 2024-11-20T15:23:41,296 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-11-20T15:23:41,296 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2750 sec 2024-11-20T15:23:41,297 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees in 1.2810 sec 2024-11-20T15:23:41,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:41,342 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b9a29e8e077d15da0686c3415c44e050 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T15:23:41,343 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=A 2024-11-20T15:23:41,343 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:41,343 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=B 2024-11-20T15:23:41,343 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:41,343 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=C 2024-11-20T15:23:41,343 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:41,364 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201d331a0abb0a45fda0b01b1fe802af69_b9a29e8e077d15da0686c3415c44e050 is 50, key is test_row_0/A:col10/1732116220725/Put/seqid=0 2024-11-20T15:23:41,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:41,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116281366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:41,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:41,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116281367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:41,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:41,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116281369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:41,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:41,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116281371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:41,376 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:41,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116281372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:41,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742504_1680 (size=12304) 2024-11-20T15:23:41,475 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:41,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116281474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:41,475 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:41,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116281474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:41,476 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:41,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116281475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:41,478 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:41,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116281476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:41,478 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:41,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116281477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:41,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:41,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116281677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:41,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:41,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116281680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:41,682 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:41,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116281680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:41,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:41,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116281682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:41,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:41,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116281682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:41,791 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:41,794 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201d331a0abb0a45fda0b01b1fe802af69_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201d331a0abb0a45fda0b01b1fe802af69_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:41,795 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/c0f04c54eea7410b8c0009fe8d517f9a, store: [table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:41,796 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/c0f04c54eea7410b8c0009fe8d517f9a is 175, key is test_row_0/A:col10/1732116220725/Put/seqid=0 2024-11-20T15:23:41,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742505_1681 (size=31105) 2024-11-20T15:23:41,983 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:41,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116281982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:41,984 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:41,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116281983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:41,986 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:41,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116281984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:41,988 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:41,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116281987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:41,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:41,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116281987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:42,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T15:23:42,121 INFO [Thread-2754 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 177 completed 2024-11-20T15:23:42,122 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:23:42,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees 2024-11-20T15:23:42,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T15:23:42,123 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:23:42,124 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:23:42,124 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=180, ppid=179, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:23:42,199 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=213, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/c0f04c54eea7410b8c0009fe8d517f9a 2024-11-20T15:23:42,205 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/0332e5125b5c470a934b1ab256552569 is 50, key is test_row_0/B:col10/1732116220725/Put/seqid=0 2024-11-20T15:23:42,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742506_1682 (size=12151) 2024-11-20T15:23:42,208 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/0332e5125b5c470a934b1ab256552569 2024-11-20T15:23:42,213 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/41637198ebc14780af436bf1b494375c is 50, key is test_row_0/C:col10/1732116220725/Put/seqid=0 2024-11-20T15:23:42,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742507_1683 (size=12151) 2024-11-20T15:23:42,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T15:23:42,275 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:42,275 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-20T15:23:42,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:42,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:42,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:42,277 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:42,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:42,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:42,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T15:23:42,429 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:42,429 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-20T15:23:42,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:42,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:42,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:42,429 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:42,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:42,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:42,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:42,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116282485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:42,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:42,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116282486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:42,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:42,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116282487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:42,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:42,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116282490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:42,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:42,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116282492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:42,581 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:42,581 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-20T15:23:42,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:42,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:42,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:42,582 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:42,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:42,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:42,616 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/41637198ebc14780af436bf1b494375c 2024-11-20T15:23:42,620 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/c0f04c54eea7410b8c0009fe8d517f9a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/c0f04c54eea7410b8c0009fe8d517f9a 2024-11-20T15:23:42,623 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/c0f04c54eea7410b8c0009fe8d517f9a, entries=150, sequenceid=213, filesize=30.4 K 2024-11-20T15:23:42,624 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/0332e5125b5c470a934b1ab256552569 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/0332e5125b5c470a934b1ab256552569 2024-11-20T15:23:42,627 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/0332e5125b5c470a934b1ab256552569, entries=150, sequenceid=213, filesize=11.9 K 2024-11-20T15:23:42,628 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/41637198ebc14780af436bf1b494375c as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/41637198ebc14780af436bf1b494375c 2024-11-20T15:23:42,630 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/41637198ebc14780af436bf1b494375c, entries=150, sequenceid=213, filesize=11.9 K 2024-11-20T15:23:42,631 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for b9a29e8e077d15da0686c3415c44e050 in 1289ms, sequenceid=213, compaction requested=true 2024-11-20T15:23:42,631 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:42,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b9a29e8e077d15da0686c3415c44e050:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:23:42,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:42,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b9a29e8e077d15da0686c3415c44e050:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:23:42,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:42,631 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:23:42,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b9a29e8e077d15da0686c3415c44e050:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:23:42,631 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:23:42,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:42,632 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 124762 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:23:42,632 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:23:42,632 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): b9a29e8e077d15da0686c3415c44e050/B is initiating minor compaction (all files) 2024-11-20T15:23:42,632 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): b9a29e8e077d15da0686c3415c44e050/A is initiating minor compaction (all files) 2024-11-20T15:23:42,632 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b9a29e8e077d15da0686c3415c44e050/A in TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:42,632 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b9a29e8e077d15da0686c3415c44e050/B in TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:42,632 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/46577b5ff4684cd395ae2955f51830d8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/c9f2093150e2406e98960cb3036def06, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/166449c9a037499588eca55a8c870eb5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/c0f04c54eea7410b8c0009fe8d517f9a] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp, totalSize=121.8 K 2024-11-20T15:23:42,632 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/f66613cb22f34b709b4f6939d315cf01, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/3ef0eb2ae314437db15a8ba5f1748c34, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/f7977095500a43eb8ebf2bc8f9b40c6c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/0332e5125b5c470a934b1ab256552569] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp, totalSize=47.8 K 2024-11-20T15:23:42,632 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:42,632 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. files: [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/46577b5ff4684cd395ae2955f51830d8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/c9f2093150e2406e98960cb3036def06, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/166449c9a037499588eca55a8c870eb5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/c0f04c54eea7410b8c0009fe8d517f9a] 2024-11-20T15:23:42,633 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46577b5ff4684cd395ae2955f51830d8, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732116218256 2024-11-20T15:23:42,633 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting f66613cb22f34b709b4f6939d315cf01, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732116218256 2024-11-20T15:23:42,633 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ef0eb2ae314437db15a8ba5f1748c34, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732116219388 2024-11-20T15:23:42,633 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting c9f2093150e2406e98960cb3036def06, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732116219388 2024-11-20T15:23:42,633 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting f7977095500a43eb8ebf2bc8f9b40c6c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732116220050 2024-11-20T15:23:42,633 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 166449c9a037499588eca55a8c870eb5, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732116220050 2024-11-20T15:23:42,634 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 0332e5125b5c470a934b1ab256552569, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732116220725 2024-11-20T15:23:42,634 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting c0f04c54eea7410b8c0009fe8d517f9a, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732116220725 2024-11-20T15:23:42,640 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:42,642 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b9a29e8e077d15da0686c3415c44e050#B#compaction#588 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:42,642 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/e45a00b04c5344b7a6f0aaf04c0f89f7 is 50, key is test_row_0/B:col10/1732116220725/Put/seqid=0 2024-11-20T15:23:42,642 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120afd585592713465f92c873585219fbd3_b9a29e8e077d15da0686c3415c44e050 store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:42,645 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120afd585592713465f92c873585219fbd3_b9a29e8e077d15da0686c3415c44e050, store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:42,645 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120afd585592713465f92c873585219fbd3_b9a29e8e077d15da0686c3415c44e050 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:42,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742508_1684 (size=12629) 2024-11-20T15:23:42,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742509_1685 (size=4469) 2024-11-20T15:23:42,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T15:23:42,734 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:42,734 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-20T15:23:42,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:42,735 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2837): Flushing b9a29e8e077d15da0686c3415c44e050 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T15:23:42,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=A 2024-11-20T15:23:42,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:42,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=B 2024-11-20T15:23:42,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:42,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=C 2024-11-20T15:23:42,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:42,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a12c9b1514624706924a8dc36a4eed6e_b9a29e8e077d15da0686c3415c44e050 is 50, key is test_row_0/A:col10/1732116221370/Put/seqid=0 2024-11-20T15:23:42,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742510_1686 (size=12304) 2024-11-20T15:23:42,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:42,749 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a12c9b1514624706924a8dc36a4eed6e_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a12c9b1514624706924a8dc36a4eed6e_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:42,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/069da517b5ee4b1088061ecc7a33e5e5, store: [table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:42,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/069da517b5ee4b1088061ecc7a33e5e5 is 175, key is test_row_0/A:col10/1732116221370/Put/seqid=0 2024-11-20T15:23:42,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742511_1687 (size=31105) 2024-11-20T15:23:43,052 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b9a29e8e077d15da0686c3415c44e050#A#compaction#589 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:43,052 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/c3a3fa1d74844fa49c9a5a95e7112b21 is 175, key is test_row_0/A:col10/1732116220725/Put/seqid=0 2024-11-20T15:23:43,054 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/e45a00b04c5344b7a6f0aaf04c0f89f7 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/e45a00b04c5344b7a6f0aaf04c0f89f7 2024-11-20T15:23:43,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742512_1688 (size=31583) 2024-11-20T15:23:43,059 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b9a29e8e077d15da0686c3415c44e050/B of b9a29e8e077d15da0686c3415c44e050 into e45a00b04c5344b7a6f0aaf04c0f89f7(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:43,059 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:43,059 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., storeName=b9a29e8e077d15da0686c3415c44e050/B, priority=12, startTime=1732116222631; duration=0sec 2024-11-20T15:23:43,059 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:43,059 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b9a29e8e077d15da0686c3415c44e050:B 2024-11-20T15:23:43,059 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:23:43,059 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/c3a3fa1d74844fa49c9a5a95e7112b21 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/c3a3fa1d74844fa49c9a5a95e7112b21 2024-11-20T15:23:43,060 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:23:43,060 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): b9a29e8e077d15da0686c3415c44e050/C is initiating minor compaction (all files) 2024-11-20T15:23:43,060 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b9a29e8e077d15da0686c3415c44e050/C in TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:43,060 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/e1c9698fe0a44000b830663d9255ad89, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/c1e67c0aa4d2472e809c3e9fa2add2c4, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/2098147f60fd494d986e487bcf450bba, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/41637198ebc14780af436bf1b494375c] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp, totalSize=47.8 K 2024-11-20T15:23:43,060 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting e1c9698fe0a44000b830663d9255ad89, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732116218256 2024-11-20T15:23:43,061 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting c1e67c0aa4d2472e809c3e9fa2add2c4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732116219388 2024-11-20T15:23:43,061 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 2098147f60fd494d986e487bcf450bba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732116220050 2024-11-20T15:23:43,061 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 41637198ebc14780af436bf1b494375c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732116220725 2024-11-20T15:23:43,062 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b9a29e8e077d15da0686c3415c44e050/A of b9a29e8e077d15da0686c3415c44e050 into c3a3fa1d74844fa49c9a5a95e7112b21(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:43,063 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:43,063 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., storeName=b9a29e8e077d15da0686c3415c44e050/A, priority=12, startTime=1732116222631; duration=0sec 2024-11-20T15:23:43,063 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:43,063 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b9a29e8e077d15da0686c3415c44e050:A 2024-11-20T15:23:43,069 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b9a29e8e077d15da0686c3415c44e050#C#compaction#591 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:43,070 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/25153a7be7824be7953e39337adb28a7 is 50, key is test_row_0/C:col10/1732116220725/Put/seqid=0 2024-11-20T15:23:43,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742513_1689 (size=12629) 2024-11-20T15:23:43,154 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=237, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/069da517b5ee4b1088061ecc7a33e5e5 2024-11-20T15:23:43,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/636a23a06d664a5b91ac46e0dc4148e1 is 50, key is test_row_0/B:col10/1732116221370/Put/seqid=0 2024-11-20T15:23:43,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742514_1690 (size=12151) 2024-11-20T15:23:43,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T15:23:43,478 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/25153a7be7824be7953e39337adb28a7 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/25153a7be7824be7953e39337adb28a7 2024-11-20T15:23:43,483 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b9a29e8e077d15da0686c3415c44e050/C of b9a29e8e077d15da0686c3415c44e050 into 25153a7be7824be7953e39337adb28a7(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:43,483 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:43,483 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., storeName=b9a29e8e077d15da0686c3415c44e050/C, priority=12, startTime=1732116222631; duration=0sec 2024-11-20T15:23:43,483 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:43,483 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b9a29e8e077d15da0686c3415c44e050:C 2024-11-20T15:23:43,493 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:43,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:43,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:43,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116283500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:43,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:43,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116283501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:43,504 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:43,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116283502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:43,504 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:43,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116283503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:43,505 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:43,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116283503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:43,575 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/636a23a06d664a5b91ac46e0dc4148e1 2024-11-20T15:23:43,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/146eae16457c46a89871dce8bfcb5a58 is 50, key is test_row_0/C:col10/1732116221370/Put/seqid=0 2024-11-20T15:23:43,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742515_1691 (size=12151) 2024-11-20T15:23:43,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:43,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116283604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:43,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:43,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116283604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:43,606 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:43,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116283605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:43,606 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:43,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116283606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:43,807 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:43,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116283805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:43,807 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:43,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116283806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:43,808 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:43,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116283807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:43,808 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:43,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116283807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:43,985 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/146eae16457c46a89871dce8bfcb5a58 2024-11-20T15:23:43,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/069da517b5ee4b1088061ecc7a33e5e5 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/069da517b5ee4b1088061ecc7a33e5e5 2024-11-20T15:23:43,992 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/069da517b5ee4b1088061ecc7a33e5e5, entries=150, sequenceid=237, filesize=30.4 K 2024-11-20T15:23:43,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/636a23a06d664a5b91ac46e0dc4148e1 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/636a23a06d664a5b91ac46e0dc4148e1 2024-11-20T15:23:43,996 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/636a23a06d664a5b91ac46e0dc4148e1, entries=150, sequenceid=237, filesize=11.9 K 2024-11-20T15:23:43,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/146eae16457c46a89871dce8bfcb5a58 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/146eae16457c46a89871dce8bfcb5a58 2024-11-20T15:23:44,000 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/146eae16457c46a89871dce8bfcb5a58, entries=150, sequenceid=237, filesize=11.9 K 2024-11-20T15:23:44,001 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for b9a29e8e077d15da0686c3415c44e050 in 1267ms, sequenceid=237, compaction requested=false 2024-11-20T15:23:44,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2538): Flush status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:44,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:44,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=180 2024-11-20T15:23:44,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=180 2024-11-20T15:23:44,003 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=180, resume processing ppid=179 2024-11-20T15:23:44,003 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, ppid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8780 sec 2024-11-20T15:23:44,005 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees in 1.8820 sec 2024-11-20T15:23:44,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:44,111 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b9a29e8e077d15da0686c3415c44e050 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T15:23:44,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=A 2024-11-20T15:23:44,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:44,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=B 2024-11-20T15:23:44,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:44,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=C 2024-11-20T15:23:44,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:44,117 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a5143477ab7b4735ad6a60f7838252d7_b9a29e8e077d15da0686c3415c44e050 is 50, key is test_row_0/A:col10/1732116224110/Put/seqid=0 2024-11-20T15:23:44,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742516_1692 (size=14794) 2024-11-20T15:23:44,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:44,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116284126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:44,161 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:44,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116284158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:44,161 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:44,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116284158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:44,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:44,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116284158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:44,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T15:23:44,227 INFO [Thread-2754 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 179 completed 2024-11-20T15:23:44,228 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:23:44,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=181, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees 2024-11-20T15:23:44,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-20T15:23:44,230 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=181, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:23:44,230 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=181, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:23:44,230 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=182, ppid=181, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:23:44,260 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:44,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116284259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:44,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:44,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116284262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:44,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:44,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116284262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:44,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:44,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116284262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:44,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-20T15:23:44,381 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:44,382 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-20T15:23:44,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:44,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:44,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:44,382 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:44,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:44,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:44,462 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:44,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116284461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:44,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:44,466 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:44,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116284464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:44,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116284464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:44,466 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:44,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116284464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:44,521 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:44,524 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a5143477ab7b4735ad6a60f7838252d7_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a5143477ab7b4735ad6a60f7838252d7_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:44,525 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/8f903770642941c1b91dc1b28ec05f3c, store: [table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:44,525 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/8f903770642941c1b91dc1b28ec05f3c is 175, key is test_row_0/A:col10/1732116224110/Put/seqid=0 2024-11-20T15:23:44,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742517_1693 (size=39749) 2024-11-20T15:23:44,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-20T15:23:44,534 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:44,534 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-20T15:23:44,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:44,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:44,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:44,535 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:44,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:44,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:44,686 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:44,687 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-20T15:23:44,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:44,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:44,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:44,687 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:44,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:44,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:44,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:44,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116284764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:44,773 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:44,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116284767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:44,775 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:44,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116284773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:44,775 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:44,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116284773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:44,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-20T15:23:44,839 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:44,839 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-20T15:23:44,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:44,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:44,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:44,840 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:44,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:44,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:44,929 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=255, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/8f903770642941c1b91dc1b28ec05f3c 2024-11-20T15:23:44,935 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/ec57e53f7fbc4ee8b0dc26fbfaeb8a01 is 50, key is test_row_0/B:col10/1732116224110/Put/seqid=0 2024-11-20T15:23:44,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742518_1694 (size=12151) 2024-11-20T15:23:44,992 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:44,992 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-20T15:23:44,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:44,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:44,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:44,992 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:44,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:44,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:45,144 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:45,145 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-20T15:23:45,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:45,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:45,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:45,145 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:45,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:45,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:45,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:45,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116285271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:45,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:45,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116285278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:45,280 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:45,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116285279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:45,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:45,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116285280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:45,297 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:45,297 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-20T15:23:45,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:45,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:45,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:45,298 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:45,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:45,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:45,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-20T15:23:45,339 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/ec57e53f7fbc4ee8b0dc26fbfaeb8a01 2024-11-20T15:23:45,346 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/9d796ae710e148a69e58f91f63194903 is 50, key is test_row_0/C:col10/1732116224110/Put/seqid=0 2024-11-20T15:23:45,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742519_1695 (size=12151) 2024-11-20T15:23:45,350 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/9d796ae710e148a69e58f91f63194903 2024-11-20T15:23:45,355 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/8f903770642941c1b91dc1b28ec05f3c as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/8f903770642941c1b91dc1b28ec05f3c 2024-11-20T15:23:45,358 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/8f903770642941c1b91dc1b28ec05f3c, entries=200, sequenceid=255, filesize=38.8 K 2024-11-20T15:23:45,358 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/ec57e53f7fbc4ee8b0dc26fbfaeb8a01 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ec57e53f7fbc4ee8b0dc26fbfaeb8a01 2024-11-20T15:23:45,362 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ec57e53f7fbc4ee8b0dc26fbfaeb8a01, entries=150, sequenceid=255, filesize=11.9 K 2024-11-20T15:23:45,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/9d796ae710e148a69e58f91f63194903 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/9d796ae710e148a69e58f91f63194903 2024-11-20T15:23:45,365 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/9d796ae710e148a69e58f91f63194903, entries=150, sequenceid=255, filesize=11.9 K 2024-11-20T15:23:45,366 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=134.18 KB/137400 for b9a29e8e077d15da0686c3415c44e050 in 1255ms, sequenceid=255, compaction requested=true 2024-11-20T15:23:45,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:45,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b9a29e8e077d15da0686c3415c44e050:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:23:45,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:45,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b9a29e8e077d15da0686c3415c44e050:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:23:45,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:45,366 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:45,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b9a29e8e077d15da0686c3415c44e050:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:23:45,366 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:45,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:45,368 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102437 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:45,368 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:45,368 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): b9a29e8e077d15da0686c3415c44e050/A is initiating minor compaction (all files) 2024-11-20T15:23:45,368 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): b9a29e8e077d15da0686c3415c44e050/B is initiating minor compaction (all files) 2024-11-20T15:23:45,368 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b9a29e8e077d15da0686c3415c44e050/A in TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:45,368 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b9a29e8e077d15da0686c3415c44e050/B in TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:45,368 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/c3a3fa1d74844fa49c9a5a95e7112b21, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/069da517b5ee4b1088061ecc7a33e5e5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/8f903770642941c1b91dc1b28ec05f3c] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp, totalSize=100.0 K 2024-11-20T15:23:45,368 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/e45a00b04c5344b7a6f0aaf04c0f89f7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/636a23a06d664a5b91ac46e0dc4148e1, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ec57e53f7fbc4ee8b0dc26fbfaeb8a01] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp, totalSize=36.1 K 2024-11-20T15:23:45,368 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:45,368 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. files: [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/c3a3fa1d74844fa49c9a5a95e7112b21, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/069da517b5ee4b1088061ecc7a33e5e5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/8f903770642941c1b91dc1b28ec05f3c] 2024-11-20T15:23:45,368 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting e45a00b04c5344b7a6f0aaf04c0f89f7, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732116220725 2024-11-20T15:23:45,368 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting c3a3fa1d74844fa49c9a5a95e7112b21, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732116220725 2024-11-20T15:23:45,369 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 636a23a06d664a5b91ac46e0dc4148e1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732116221364 2024-11-20T15:23:45,369 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 069da517b5ee4b1088061ecc7a33e5e5, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732116221364 2024-11-20T15:23:45,370 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting ec57e53f7fbc4ee8b0dc26fbfaeb8a01, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732116223497 2024-11-20T15:23:45,370 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f903770642941c1b91dc1b28ec05f3c, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732116223497 2024-11-20T15:23:45,375 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:45,376 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b9a29e8e077d15da0686c3415c44e050#B#compaction#597 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:45,377 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/b9c95357b09a424eb1e4f0d45b9dbf53 is 50, key is test_row_0/B:col10/1732116224110/Put/seqid=0 2024-11-20T15:23:45,377 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411205de51fa5ab9644cd8a6d02d4a2070447_b9a29e8e077d15da0686c3415c44e050 store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:45,379 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411205de51fa5ab9644cd8a6d02d4a2070447_b9a29e8e077d15da0686c3415c44e050, store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:45,379 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205de51fa5ab9644cd8a6d02d4a2070447_b9a29e8e077d15da0686c3415c44e050 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:45,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742521_1697 (size=4469) 2024-11-20T15:23:45,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742520_1696 (size=12731) 2024-11-20T15:23:45,397 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/b9c95357b09a424eb1e4f0d45b9dbf53 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/b9c95357b09a424eb1e4f0d45b9dbf53 2024-11-20T15:23:45,401 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b9a29e8e077d15da0686c3415c44e050/B of b9a29e8e077d15da0686c3415c44e050 into b9c95357b09a424eb1e4f0d45b9dbf53(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:45,401 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:45,401 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., storeName=b9a29e8e077d15da0686c3415c44e050/B, priority=13, startTime=1732116225366; duration=0sec 2024-11-20T15:23:45,402 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:45,402 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b9a29e8e077d15da0686c3415c44e050:B 2024-11-20T15:23:45,402 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:45,402 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:45,402 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): b9a29e8e077d15da0686c3415c44e050/C is initiating minor compaction (all files) 2024-11-20T15:23:45,403 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b9a29e8e077d15da0686c3415c44e050/C in TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:45,403 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/25153a7be7824be7953e39337adb28a7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/146eae16457c46a89871dce8bfcb5a58, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/9d796ae710e148a69e58f91f63194903] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp, totalSize=36.1 K 2024-11-20T15:23:45,403 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 25153a7be7824be7953e39337adb28a7, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732116220725 2024-11-20T15:23:45,403 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 146eae16457c46a89871dce8bfcb5a58, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732116221364 2024-11-20T15:23:45,403 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d796ae710e148a69e58f91f63194903, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732116223497 2024-11-20T15:23:45,410 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b9a29e8e077d15da0686c3415c44e050#C#compaction#599 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:45,410 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/6365ea60f74d4c7ea01224a97151d97f is 50, key is test_row_0/C:col10/1732116224110/Put/seqid=0 2024-11-20T15:23:45,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742522_1698 (size=12731) 2024-11-20T15:23:45,449 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:45,450 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-20T15:23:45,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:45,450 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2837): Flushing b9a29e8e077d15da0686c3415c44e050 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T15:23:45,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=A 2024-11-20T15:23:45,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:45,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=B 2024-11-20T15:23:45,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:45,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=C 2024-11-20T15:23:45,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:45,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120bf54f799058c4f30920713a846057601_b9a29e8e077d15da0686c3415c44e050 is 50, key is test_row_0/A:col10/1732116224120/Put/seqid=0 2024-11-20T15:23:45,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742523_1699 (size=12454) 2024-11-20T15:23:45,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:45,464 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120bf54f799058c4f30920713a846057601_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bf54f799058c4f30920713a846057601_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:45,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/04e6cc82d6c249fca690a8ae73fd8027, store: [table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:45,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/04e6cc82d6c249fca690a8ae73fd8027 is 175, key is test_row_0/A:col10/1732116224120/Put/seqid=0 2024-11-20T15:23:45,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742524_1700 (size=31255) 2024-11-20T15:23:45,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:45,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:45,546 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:45,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116285545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:45,648 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:45,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116285647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:45,783 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b9a29e8e077d15da0686c3415c44e050#A#compaction#598 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:45,784 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/755cf64a8840410ba05841a1b270aa64 is 175, key is test_row_0/A:col10/1732116224110/Put/seqid=0 2024-11-20T15:23:45,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742525_1701 (size=31685) 2024-11-20T15:23:45,818 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/6365ea60f74d4c7ea01224a97151d97f as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/6365ea60f74d4c7ea01224a97151d97f 2024-11-20T15:23:45,821 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b9a29e8e077d15da0686c3415c44e050/C of b9a29e8e077d15da0686c3415c44e050 into 6365ea60f74d4c7ea01224a97151d97f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:45,821 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:45,822 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., storeName=b9a29e8e077d15da0686c3415c44e050/C, priority=13, startTime=1732116225366; duration=0sec 2024-11-20T15:23:45,822 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:45,822 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b9a29e8e077d15da0686c3415c44e050:C 2024-11-20T15:23:45,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:45,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116285850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:45,869 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=279, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/04e6cc82d6c249fca690a8ae73fd8027 2024-11-20T15:23:45,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/1f13bdcd5aef4ff4a94715e6f411db86 is 50, key is test_row_0/B:col10/1732116224120/Put/seqid=0 2024-11-20T15:23:45,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742526_1702 (size=12301) 2024-11-20T15:23:46,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:46,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116286153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:46,192 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/755cf64a8840410ba05841a1b270aa64 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/755cf64a8840410ba05841a1b270aa64 2024-11-20T15:23:46,196 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b9a29e8e077d15da0686c3415c44e050/A of b9a29e8e077d15da0686c3415c44e050 into 755cf64a8840410ba05841a1b270aa64(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:46,196 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:46,196 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., storeName=b9a29e8e077d15da0686c3415c44e050/A, priority=13, startTime=1732116225366; duration=0sec 2024-11-20T15:23:46,196 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:46,196 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b9a29e8e077d15da0686c3415c44e050:A 2024-11-20T15:23:46,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:46,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116286273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:46,279 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=279 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/1f13bdcd5aef4ff4a94715e6f411db86 2024-11-20T15:23:46,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:46,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116286281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:46,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/6b2d8d3ce017473488ab3058539c3097 is 50, key is test_row_0/C:col10/1732116224120/Put/seqid=0 2024-11-20T15:23:46,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742527_1703 (size=12301) 2024-11-20T15:23:46,288 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=279 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/6b2d8d3ce017473488ab3058539c3097 2024-11-20T15:23:46,291 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:46,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116286290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:46,292 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:46,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/04e6cc82d6c249fca690a8ae73fd8027 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/04e6cc82d6c249fca690a8ae73fd8027 2024-11-20T15:23:46,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116286291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:46,295 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/04e6cc82d6c249fca690a8ae73fd8027, entries=150, sequenceid=279, filesize=30.5 K 2024-11-20T15:23:46,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/1f13bdcd5aef4ff4a94715e6f411db86 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/1f13bdcd5aef4ff4a94715e6f411db86 2024-11-20T15:23:46,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,299 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/1f13bdcd5aef4ff4a94715e6f411db86, entries=150, sequenceid=279, filesize=12.0 K 2024-11-20T15:23:46,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/6b2d8d3ce017473488ab3058539c3097 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/6b2d8d3ce017473488ab3058539c3097 2024-11-20T15:23:46,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,303 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/6b2d8d3ce017473488ab3058539c3097, entries=150, sequenceid=279, filesize=12.0 K 2024-11-20T15:23:46,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,304 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for b9a29e8e077d15da0686c3415c44e050 in 854ms, sequenceid=279, compaction requested=false 2024-11-20T15:23:46,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2538): Flush status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:46,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:46,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=182 2024-11-20T15:23:46,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=182 2024-11-20T15:23:46,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,306 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=182, resume processing ppid=181 2024-11-20T15:23:46,306 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=181, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0750 sec 2024-11-20T15:23:46,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,308 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees in 2.0790 sec 2024-11-20T15:23:46,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-20T15:23:46,333 INFO [Thread-2754 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 181 completed 2024-11-20T15:23:46,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,334 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:23:46,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=183, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees 2024-11-20T15:23:46,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,336 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=183, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:23:46,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-20T15:23:46,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,337 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=183, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:23:46,337 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=184, ppid=183, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:23:46,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-20T15:23:46,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,488 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:46,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,489 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-11-20T15:23:46,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:46,489 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2837): Flushing b9a29e8e077d15da0686c3415c44e050 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T15:23:46,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=A 2024-11-20T15:23:46,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:46,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=B 2024-11-20T15:23:46,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:46,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=C 2024-11-20T15:23:46,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:46,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120932701eb3c58409ea6ef8618881eb41d_b9a29e8e077d15da0686c3415c44e050 is 50, key is test_row_0/A:col10/1732116225539/Put/seqid=0 2024-11-20T15:23:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742528_1704 (size=9914) 2024-11-20T15:23:46,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,503 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120932701eb3c58409ea6ef8618881eb41d_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120932701eb3c58409ea6ef8618881eb41d_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:46,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/d0651c53199f42b090045c168b77d943, store: [table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:46,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/d0651c53199f42b090045c168b77d943 is 175, key is test_row_0/A:col10/1732116225539/Put/seqid=0 2024-11-20T15:23:46,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742529_1705 (size=22561) 2024-11-20T15:23:46,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,509 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=294, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/d0651c53199f42b090045c168b77d943 2024-11-20T15:23:46,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/16dcb0787fae4a85b8469aa1fded9545 is 50, key is test_row_0/B:col10/1732116225539/Put/seqid=0 2024-11-20T15:23:46,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742530_1706 (size=9857) 2024-11-20T15:23:46,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-20T15:23:46,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:46,660 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:46,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:46,748 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:46,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116286747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:46,850 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:46,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116286849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:46,920 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/16dcb0787fae4a85b8469aa1fded9545 2024-11-20T15:23:46,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/e1242db91e2642b5a4d7414711ac3875 is 50, key is test_row_0/C:col10/1732116225539/Put/seqid=0 2024-11-20T15:23:46,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742531_1707 (size=9857) 2024-11-20T15:23:46,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-20T15:23:47,052 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:47,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116287051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:47,329 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/e1242db91e2642b5a4d7414711ac3875 2024-11-20T15:23:47,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/d0651c53199f42b090045c168b77d943 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/d0651c53199f42b090045c168b77d943 2024-11-20T15:23:47,337 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/d0651c53199f42b090045c168b77d943, entries=100, sequenceid=294, filesize=22.0 K 2024-11-20T15:23:47,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/16dcb0787fae4a85b8469aa1fded9545 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/16dcb0787fae4a85b8469aa1fded9545 2024-11-20T15:23:47,340 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/16dcb0787fae4a85b8469aa1fded9545, entries=100, sequenceid=294, filesize=9.6 K 2024-11-20T15:23:47,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/e1242db91e2642b5a4d7414711ac3875 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/e1242db91e2642b5a4d7414711ac3875 2024-11-20T15:23:47,343 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/e1242db91e2642b5a4d7414711ac3875, entries=100, sequenceid=294, filesize=9.6 K 2024-11-20T15:23:47,344 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for b9a29e8e077d15da0686c3415c44e050 in 855ms, sequenceid=294, compaction requested=true 2024-11-20T15:23:47,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2538): Flush status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:47,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:47,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=184 2024-11-20T15:23:47,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=184 2024-11-20T15:23:47,346 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=184, resume processing ppid=183 2024-11-20T15:23:47,346 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=184, ppid=183, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0080 sec 2024-11-20T15:23:47,348 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees in 1.0130 sec 2024-11-20T15:23:47,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:47,358 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b9a29e8e077d15da0686c3415c44e050 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T15:23:47,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=A 2024-11-20T15:23:47,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:47,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=B 2024-11-20T15:23:47,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:47,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=C 2024-11-20T15:23:47,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:47,368 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120994557d4fd9f4be0bae74a9c99801ba6_b9a29e8e077d15da0686c3415c44e050 is 50, key is test_row_0/A:col10/1732116227356/Put/seqid=0 2024-11-20T15:23:47,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:47,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116287381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:47,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742532_1708 (size=14994) 2024-11-20T15:23:47,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-20T15:23:47,440 INFO [Thread-2754 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 183 completed 2024-11-20T15:23:47,441 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:23:47,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=185, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees 2024-11-20T15:23:47,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-20T15:23:47,442 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=185, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:23:47,443 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=185, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:23:47,443 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:23:47,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:47,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116287484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:47,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-20T15:23:47,594 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:47,595 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=186 2024-11-20T15:23:47,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:47,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:47,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:47,595 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] handler.RSProcedureHandler(58): pid=186 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:47,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=186 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:47,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=186 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:47,687 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:47,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116287686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:47,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-20T15:23:47,747 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:47,747 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=186 2024-11-20T15:23:47,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:47,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:47,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:47,748 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] handler.RSProcedureHandler(58): pid=186 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:47,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=186 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:47,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=186 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:47,788 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:47,791 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120994557d4fd9f4be0bae74a9c99801ba6_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120994557d4fd9f4be0bae74a9c99801ba6_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:47,792 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/62db193c898f45238a93dff9eed73a72, store: [table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:47,792 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/62db193c898f45238a93dff9eed73a72 is 175, key is test_row_0/A:col10/1732116227356/Put/seqid=0 2024-11-20T15:23:47,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742533_1709 (size=39949) 2024-11-20T15:23:47,900 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:47,900 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=186 2024-11-20T15:23:47,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:47,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:47,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:47,901 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] handler.RSProcedureHandler(58): pid=186 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:47,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=186 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:47,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=186 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:47,990 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:47,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116287989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:48,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-20T15:23:48,052 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:48,053 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=186 2024-11-20T15:23:48,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:48,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:48,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:48,053 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] handler.RSProcedureHandler(58): pid=186 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:48,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=186 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:48,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=186 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:48,197 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=318, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/62db193c898f45238a93dff9eed73a72 2024-11-20T15:23:48,203 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/b38871d1e0f24c5c91516b79f58f0ff8 is 50, key is test_row_0/B:col10/1732116227356/Put/seqid=0 2024-11-20T15:23:48,205 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:48,205 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=186 2024-11-20T15:23:48,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:48,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:48,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:48,206 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] handler.RSProcedureHandler(58): pid=186 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:48,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=186 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:48,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=186 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:48,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742534_1710 (size=12301) 2024-11-20T15:23:48,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:48,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116288292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:48,294 DEBUG [Thread-2744 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4136 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., hostname=0b62285ead89,33387,1732116069954, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:23:48,296 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:48,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116288294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:48,296 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:48,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116288294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:48,296 DEBUG [Thread-2748 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4170 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., hostname=0b62285ead89,33387,1732116069954, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:23:48,296 DEBUG [Thread-2746 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4138 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., hostname=0b62285ead89,33387,1732116069954, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:23:48,312 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:48,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116288311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:48,313 DEBUG [Thread-2752 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4155 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., hostname=0b62285ead89,33387,1732116069954, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:23:48,358 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:48,358 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=186 2024-11-20T15:23:48,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:48,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:48,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:48,358 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] handler.RSProcedureHandler(58): pid=186 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:48,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=186 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:48,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=186 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:48,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:48,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116288495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:48,510 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:48,510 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=186 2024-11-20T15:23:48,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:48,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:48,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:48,511 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] handler.RSProcedureHandler(58): pid=186 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:48,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=186 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:48,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=186 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:48,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-20T15:23:48,619 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/b38871d1e0f24c5c91516b79f58f0ff8 2024-11-20T15:23:48,626 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/9fff94855230425a8a9e7e33c47d8fc1 is 50, key is test_row_0/C:col10/1732116227356/Put/seqid=0 2024-11-20T15:23:48,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742535_1711 (size=12301) 2024-11-20T15:23:48,638 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/9fff94855230425a8a9e7e33c47d8fc1 2024-11-20T15:23:48,642 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/62db193c898f45238a93dff9eed73a72 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/62db193c898f45238a93dff9eed73a72 2024-11-20T15:23:48,646 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/62db193c898f45238a93dff9eed73a72, entries=200, sequenceid=318, filesize=39.0 K 2024-11-20T15:23:48,646 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/b38871d1e0f24c5c91516b79f58f0ff8 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/b38871d1e0f24c5c91516b79f58f0ff8 2024-11-20T15:23:48,649 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/b38871d1e0f24c5c91516b79f58f0ff8, entries=150, sequenceid=318, filesize=12.0 K 2024-11-20T15:23:48,650 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/9fff94855230425a8a9e7e33c47d8fc1 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/9fff94855230425a8a9e7e33c47d8fc1 2024-11-20T15:23:48,653 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/9fff94855230425a8a9e7e33c47d8fc1, entries=150, sequenceid=318, filesize=12.0 K 2024-11-20T15:23:48,654 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for b9a29e8e077d15da0686c3415c44e050 in 1296ms, sequenceid=318, compaction requested=true 2024-11-20T15:23:48,654 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:48,654 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b9a29e8e077d15da0686c3415c44e050:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:23:48,654 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:48,654 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:23:48,654 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:23:48,654 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b9a29e8e077d15da0686c3415c44e050:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:23:48,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:48,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b9a29e8e077d15da0686c3415c44e050:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:23:48,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:48,655 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47190 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:23:48,655 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 125450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:23:48,655 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): b9a29e8e077d15da0686c3415c44e050/B is initiating minor compaction (all files) 2024-11-20T15:23:48,655 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): b9a29e8e077d15da0686c3415c44e050/A is initiating minor compaction (all files) 2024-11-20T15:23:48,656 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b9a29e8e077d15da0686c3415c44e050/A in TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:48,656 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b9a29e8e077d15da0686c3415c44e050/B in TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:48,656 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/755cf64a8840410ba05841a1b270aa64, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/04e6cc82d6c249fca690a8ae73fd8027, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/d0651c53199f42b090045c168b77d943, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/62db193c898f45238a93dff9eed73a72] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp, totalSize=122.5 K 2024-11-20T15:23:48,656 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/b9c95357b09a424eb1e4f0d45b9dbf53, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/1f13bdcd5aef4ff4a94715e6f411db86, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/16dcb0787fae4a85b8469aa1fded9545, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/b38871d1e0f24c5c91516b79f58f0ff8] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp, totalSize=46.1 K 2024-11-20T15:23:48,656 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:48,656 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. files: [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/755cf64a8840410ba05841a1b270aa64, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/04e6cc82d6c249fca690a8ae73fd8027, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/d0651c53199f42b090045c168b77d943, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/62db193c898f45238a93dff9eed73a72] 2024-11-20T15:23:48,656 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting b9c95357b09a424eb1e4f0d45b9dbf53, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732116223497 2024-11-20T15:23:48,656 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 755cf64a8840410ba05841a1b270aa64, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732116223497 2024-11-20T15:23:48,656 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 04e6cc82d6c249fca690a8ae73fd8027, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732116224120 2024-11-20T15:23:48,656 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f13bdcd5aef4ff4a94715e6f411db86, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732116224120 2024-11-20T15:23:48,657 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting d0651c53199f42b090045c168b77d943, keycount=100, bloomtype=ROW, size=22.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732116225539 2024-11-20T15:23:48,657 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 16dcb0787fae4a85b8469aa1fded9545, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732116225539 2024-11-20T15:23:48,657 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting b38871d1e0f24c5c91516b79f58f0ff8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732116226699 2024-11-20T15:23:48,657 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 62db193c898f45238a93dff9eed73a72, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732116226699 2024-11-20T15:23:48,663 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:48,663 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=186 2024-11-20T15:23:48,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:48,663 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2837): Flushing b9a29e8e077d15da0686c3415c44e050 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T15:23:48,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=A 2024-11-20T15:23:48,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:48,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=B 2024-11-20T15:23:48,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:48,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=C 2024-11-20T15:23:48,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:48,665 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:48,665 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b9a29e8e077d15da0686c3415c44e050#B#compaction#609 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:48,666 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/c3d569173da242fe8a86533a5b82bf87 is 50, key is test_row_0/B:col10/1732116227356/Put/seqid=0 2024-11-20T15:23:48,673 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411203321942c19584315bbe43c069db9a052_b9a29e8e077d15da0686c3415c44e050 store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:48,675 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411203321942c19584315bbe43c069db9a052_b9a29e8e077d15da0686c3415c44e050, store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:48,676 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203321942c19584315bbe43c069db9a052_b9a29e8e077d15da0686c3415c44e050 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:48,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742536_1712 (size=13017) 2024-11-20T15:23:48,681 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/c3d569173da242fe8a86533a5b82bf87 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/c3d569173da242fe8a86533a5b82bf87 2024-11-20T15:23:48,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120dfa4ed779ac64fec9709d3abb511185f_b9a29e8e077d15da0686c3415c44e050 is 50, key is test_row_0/A:col10/1732116227368/Put/seqid=0 2024-11-20T15:23:48,687 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b9a29e8e077d15da0686c3415c44e050/B of b9a29e8e077d15da0686c3415c44e050 into c3d569173da242fe8a86533a5b82bf87(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:48,687 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:48,687 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., storeName=b9a29e8e077d15da0686c3415c44e050/B, priority=12, startTime=1732116228654; duration=0sec 2024-11-20T15:23:48,688 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:48,688 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b9a29e8e077d15da0686c3415c44e050:B 2024-11-20T15:23:48,688 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T15:23:48,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742537_1713 (size=4469) 2024-11-20T15:23:48,689 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47190 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T15:23:48,690 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): b9a29e8e077d15da0686c3415c44e050/C is initiating minor compaction (all files) 2024-11-20T15:23:48,690 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b9a29e8e077d15da0686c3415c44e050/C in TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:48,690 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/6365ea60f74d4c7ea01224a97151d97f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/6b2d8d3ce017473488ab3058539c3097, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/e1242db91e2642b5a4d7414711ac3875, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/9fff94855230425a8a9e7e33c47d8fc1] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp, totalSize=46.1 K 2024-11-20T15:23:48,690 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 6365ea60f74d4c7ea01224a97151d97f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732116223497 2024-11-20T15:23:48,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742538_1714 (size=12454) 2024-11-20T15:23:48,690 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b2d8d3ce017473488ab3058539c3097, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732116224120 2024-11-20T15:23:48,691 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting e1242db91e2642b5a4d7414711ac3875, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732116225539 2024-11-20T15:23:48,691 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 9fff94855230425a8a9e7e33c47d8fc1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732116226699 2024-11-20T15:23:48,699 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b9a29e8e077d15da0686c3415c44e050#C#compaction#612 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:48,699 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/cdf8ea84c6704165b0eb81b9e4a93911 is 50, key is test_row_0/C:col10/1732116227356/Put/seqid=0 2024-11-20T15:23:48,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742539_1715 (size=13017) 2024-11-20T15:23:48,707 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/cdf8ea84c6704165b0eb81b9e4a93911 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/cdf8ea84c6704165b0eb81b9e4a93911 2024-11-20T15:23:48,713 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b9a29e8e077d15da0686c3415c44e050/C of b9a29e8e077d15da0686c3415c44e050 into cdf8ea84c6704165b0eb81b9e4a93911(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:48,713 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:48,713 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., storeName=b9a29e8e077d15da0686c3415c44e050/C, priority=12, startTime=1732116228655; duration=0sec 2024-11-20T15:23:48,714 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:48,714 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b9a29e8e077d15da0686c3415c44e050:C 2024-11-20T15:23:49,090 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b9a29e8e077d15da0686c3415c44e050#A#compaction#610 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:49,090 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/9e6979c9c066401a9c27612054d6ecd8 is 175, key is test_row_0/A:col10/1732116227356/Put/seqid=0 2024-11-20T15:23:49,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:49,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742540_1716 (size=31971) 2024-11-20T15:23:49,095 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120dfa4ed779ac64fec9709d3abb511185f_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dfa4ed779ac64fec9709d3abb511185f_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:49,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/ff29c2b86ad54fda95e60f87e4e6904c, store: [table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:49,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/ff29c2b86ad54fda95e60f87e4e6904c is 175, key is test_row_0/A:col10/1732116227368/Put/seqid=0 2024-11-20T15:23:49,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742541_1717 (size=31255) 2024-11-20T15:23:49,498 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:49,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:49,499 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/9e6979c9c066401a9c27612054d6ecd8 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/9e6979c9c066401a9c27612054d6ecd8 2024-11-20T15:23:49,500 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=330, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/ff29c2b86ad54fda95e60f87e4e6904c 2024-11-20T15:23:49,503 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b9a29e8e077d15da0686c3415c44e050/A of b9a29e8e077d15da0686c3415c44e050 into 9e6979c9c066401a9c27612054d6ecd8(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:49,503 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:49,503 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., storeName=b9a29e8e077d15da0686c3415c44e050/A, priority=12, startTime=1732116228654; duration=0sec 2024-11-20T15:23:49,504 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:49,504 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b9a29e8e077d15da0686c3415c44e050:A 2024-11-20T15:23:49,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/ccdedebcf5b74e75b184ab309c5ceeb0 is 50, key is test_row_0/B:col10/1732116227368/Put/seqid=0 2024-11-20T15:23:49,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742542_1718 (size=12301) 2024-11-20T15:23:49,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-20T15:23:49,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:49,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116289549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:49,653 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:49,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116289652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:49,855 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:49,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116289854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:49,910 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/ccdedebcf5b74e75b184ab309c5ceeb0 2024-11-20T15:23:49,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/697187f1f244468b9648b7c6cb9fd41f is 50, key is test_row_0/C:col10/1732116227368/Put/seqid=0 2024-11-20T15:23:49,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742543_1719 (size=12301) 2024-11-20T15:23:50,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:50,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116290157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:50,319 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/697187f1f244468b9648b7c6cb9fd41f 2024-11-20T15:23:50,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/ff29c2b86ad54fda95e60f87e4e6904c as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/ff29c2b86ad54fda95e60f87e4e6904c 2024-11-20T15:23:50,326 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/ff29c2b86ad54fda95e60f87e4e6904c, entries=150, sequenceid=330, filesize=30.5 K 2024-11-20T15:23:50,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/ccdedebcf5b74e75b184ab309c5ceeb0 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ccdedebcf5b74e75b184ab309c5ceeb0 2024-11-20T15:23:50,330 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ccdedebcf5b74e75b184ab309c5ceeb0, entries=150, sequenceid=330, filesize=12.0 K 2024-11-20T15:23:50,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/697187f1f244468b9648b7c6cb9fd41f as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/697187f1f244468b9648b7c6cb9fd41f 2024-11-20T15:23:50,334 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/697187f1f244468b9648b7c6cb9fd41f, entries=150, sequenceid=330, filesize=12.0 K 2024-11-20T15:23:50,335 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for b9a29e8e077d15da0686c3415c44e050 in 1672ms, sequenceid=330, compaction requested=false 2024-11-20T15:23:50,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2538): Flush status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:50,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:50,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-11-20T15:23:50,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=186 2024-11-20T15:23:50,337 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=186, resume processing ppid=185 2024-11-20T15:23:50,337 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=186, ppid=185, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8930 sec 2024-11-20T15:23:50,338 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=185, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees in 2.8960 sec 2024-11-20T15:23:50,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:50,666 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b9a29e8e077d15da0686c3415c44e050 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T15:23:50,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=A 2024-11-20T15:23:50,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:50,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=B 2024-11-20T15:23:50,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:50,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=C 2024-11-20T15:23:50,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:50,672 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205676e0bbb9ff4f0ca0ed860779a9cb2c_b9a29e8e077d15da0686c3415c44e050 is 50, key is test_row_0/A:col10/1732116229545/Put/seqid=0 2024-11-20T15:23:50,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742544_1720 (size=14994) 2024-11-20T15:23:50,687 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:50,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116290685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:50,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:50,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116290788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:50,992 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:50,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116290991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:51,080 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:51,083 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205676e0bbb9ff4f0ca0ed860779a9cb2c_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205676e0bbb9ff4f0ca0ed860779a9cb2c_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:51,083 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/cf630ba61d094ceea32f9897dcea2b81, store: [table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:51,084 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/cf630ba61d094ceea32f9897dcea2b81 is 175, key is test_row_0/A:col10/1732116229545/Put/seqid=0 2024-11-20T15:23:51,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742545_1721 (size=39949) 2024-11-20T15:23:51,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:51,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116291293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:51,488 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=358, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/cf630ba61d094ceea32f9897dcea2b81 2024-11-20T15:23:51,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/ccba6a2652094e50988d20f429fa844a is 50, key is test_row_0/B:col10/1732116229545/Put/seqid=0 2024-11-20T15:23:51,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742546_1722 (size=12301) 2024-11-20T15:23:51,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-20T15:23:51,546 INFO [Thread-2754 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 185 completed 2024-11-20T15:23:51,548 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:23:51,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=187, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees 2024-11-20T15:23:51,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-11-20T15:23:51,549 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=187, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:23:51,550 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=187, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:23:51,550 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=188, ppid=187, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:23:51,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-11-20T15:23:51,701 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:51,701 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-20T15:23:51,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:51,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:51,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:51,702 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:51,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:51,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:51,799 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:51,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116291797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:51,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-11-20T15:23:51,854 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:51,854 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-20T15:23:51,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:51,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:51,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:51,854 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:51,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:51,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:51,898 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/ccba6a2652094e50988d20f429fa844a 2024-11-20T15:23:51,905 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/036aad7f56e04523b518685f03667c4b is 50, key is test_row_0/C:col10/1732116229545/Put/seqid=0 2024-11-20T15:23:51,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742547_1723 (size=12301) 2024-11-20T15:23:51,909 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/036aad7f56e04523b518685f03667c4b 2024-11-20T15:23:51,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/cf630ba61d094ceea32f9897dcea2b81 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/cf630ba61d094ceea32f9897dcea2b81 2024-11-20T15:23:51,915 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/cf630ba61d094ceea32f9897dcea2b81, entries=200, sequenceid=358, filesize=39.0 K 2024-11-20T15:23:51,916 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/ccba6a2652094e50988d20f429fa844a as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ccba6a2652094e50988d20f429fa844a 2024-11-20T15:23:51,919 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ccba6a2652094e50988d20f429fa844a, entries=150, sequenceid=358, filesize=12.0 K 2024-11-20T15:23:51,920 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/036aad7f56e04523b518685f03667c4b as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/036aad7f56e04523b518685f03667c4b 2024-11-20T15:23:51,923 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/036aad7f56e04523b518685f03667c4b, entries=150, sequenceid=358, filesize=12.0 K 2024-11-20T15:23:51,923 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for b9a29e8e077d15da0686c3415c44e050 in 1257ms, sequenceid=358, compaction requested=true 2024-11-20T15:23:51,923 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:51,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b9a29e8e077d15da0686c3415c44e050:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:23:51,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:51,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b9a29e8e077d15da0686c3415c44e050:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:23:51,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:51,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b9a29e8e077d15da0686c3415c44e050:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:23:51,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:51,924 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:51,924 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:51,924 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:51,924 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103175 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:51,925 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): b9a29e8e077d15da0686c3415c44e050/B is initiating minor compaction (all files) 2024-11-20T15:23:51,925 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): b9a29e8e077d15da0686c3415c44e050/A is initiating minor compaction (all files) 2024-11-20T15:23:51,925 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b9a29e8e077d15da0686c3415c44e050/B in TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:51,925 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b9a29e8e077d15da0686c3415c44e050/A in TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:51,925 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/9e6979c9c066401a9c27612054d6ecd8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/ff29c2b86ad54fda95e60f87e4e6904c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/cf630ba61d094ceea32f9897dcea2b81] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp, totalSize=100.8 K 2024-11-20T15:23:51,925 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/c3d569173da242fe8a86533a5b82bf87, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ccdedebcf5b74e75b184ab309c5ceeb0, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ccba6a2652094e50988d20f429fa844a] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp, totalSize=36.7 K 2024-11-20T15:23:51,925 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:51,925 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. files: [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/9e6979c9c066401a9c27612054d6ecd8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/ff29c2b86ad54fda95e60f87e4e6904c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/cf630ba61d094ceea32f9897dcea2b81] 2024-11-20T15:23:51,925 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting c3d569173da242fe8a86533a5b82bf87, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732116226699 2024-11-20T15:23:51,925 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e6979c9c066401a9c27612054d6ecd8, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732116226699 2024-11-20T15:23:51,925 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting ccdedebcf5b74e75b184ab309c5ceeb0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732116227368 2024-11-20T15:23:51,926 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff29c2b86ad54fda95e60f87e4e6904c, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732116227368 2024-11-20T15:23:51,926 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting ccba6a2652094e50988d20f429fa844a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732116229543 2024-11-20T15:23:51,926 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf630ba61d094ceea32f9897dcea2b81, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732116229543 2024-11-20T15:23:51,930 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:51,932 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b9a29e8e077d15da0686c3415c44e050#B#compaction#619 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:51,932 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/11e9ab7977f54349a303875027293d33 is 50, key is test_row_0/B:col10/1732116229545/Put/seqid=0 2024-11-20T15:23:51,934 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120f7bae32a060b4947a47d9b4eaea67dde_b9a29e8e077d15da0686c3415c44e050 store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:51,935 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120f7bae32a060b4947a47d9b4eaea67dde_b9a29e8e077d15da0686c3415c44e050, store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:51,935 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f7bae32a060b4947a47d9b4eaea67dde_b9a29e8e077d15da0686c3415c44e050 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:51,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742548_1724 (size=13119) 2024-11-20T15:23:51,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742549_1725 (size=4469) 2024-11-20T15:23:51,941 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b9a29e8e077d15da0686c3415c44e050#A#compaction#618 average throughput is 2.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:51,942 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/7fe123a90bb540f28eb12dc842616cbb is 175, key is test_row_0/A:col10/1732116229545/Put/seqid=0 2024-11-20T15:23:51,944 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/11e9ab7977f54349a303875027293d33 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/11e9ab7977f54349a303875027293d33 2024-11-20T15:23:51,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742550_1726 (size=32073) 2024-11-20T15:23:51,949 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b9a29e8e077d15da0686c3415c44e050/B of b9a29e8e077d15da0686c3415c44e050 into 11e9ab7977f54349a303875027293d33(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:51,949 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:51,949 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., storeName=b9a29e8e077d15da0686c3415c44e050/B, priority=13, startTime=1732116231924; duration=0sec 2024-11-20T15:23:51,949 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:51,949 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b9a29e8e077d15da0686c3415c44e050:B 2024-11-20T15:23:51,949 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:51,949 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/7fe123a90bb540f28eb12dc842616cbb as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/7fe123a90bb540f28eb12dc842616cbb 2024-11-20T15:23:51,950 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:51,950 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): b9a29e8e077d15da0686c3415c44e050/C is initiating minor compaction (all files) 2024-11-20T15:23:51,950 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b9a29e8e077d15da0686c3415c44e050/C in TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:51,950 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/cdf8ea84c6704165b0eb81b9e4a93911, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/697187f1f244468b9648b7c6cb9fd41f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/036aad7f56e04523b518685f03667c4b] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp, totalSize=36.7 K 2024-11-20T15:23:51,951 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting cdf8ea84c6704165b0eb81b9e4a93911, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732116226699 2024-11-20T15:23:51,951 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 697187f1f244468b9648b7c6cb9fd41f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732116227368 2024-11-20T15:23:51,951 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 036aad7f56e04523b518685f03667c4b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732116229543 2024-11-20T15:23:51,953 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b9a29e8e077d15da0686c3415c44e050/A of b9a29e8e077d15da0686c3415c44e050 into 7fe123a90bb540f28eb12dc842616cbb(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:51,954 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:51,954 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., storeName=b9a29e8e077d15da0686c3415c44e050/A, priority=13, startTime=1732116231924; duration=0sec 2024-11-20T15:23:51,954 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:51,954 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b9a29e8e077d15da0686c3415c44e050:A 2024-11-20T15:23:51,958 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b9a29e8e077d15da0686c3415c44e050#C#compaction#620 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:51,959 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/d811169b9246401b9b7ed2b18c8de703 is 50, key is test_row_0/C:col10/1732116229545/Put/seqid=0 2024-11-20T15:23:51,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742551_1727 (size=13119) 2024-11-20T15:23:51,966 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/d811169b9246401b9b7ed2b18c8de703 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/d811169b9246401b9b7ed2b18c8de703 2024-11-20T15:23:51,969 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b9a29e8e077d15da0686c3415c44e050/C of b9a29e8e077d15da0686c3415c44e050 into d811169b9246401b9b7ed2b18c8de703(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:51,969 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:51,969 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., storeName=b9a29e8e077d15da0686c3415c44e050/C, priority=13, startTime=1732116231924; duration=0sec 2024-11-20T15:23:51,969 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:51,969 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b9a29e8e077d15da0686c3415c44e050:C 2024-11-20T15:23:52,006 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:52,007 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-20T15:23:52,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:52,007 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2837): Flushing b9a29e8e077d15da0686c3415c44e050 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T15:23:52,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=A 2024-11-20T15:23:52,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:52,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=B 2024-11-20T15:23:52,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:52,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=C 2024-11-20T15:23:52,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:52,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120574e468cf3344e1392c32fd1da40cc81_b9a29e8e077d15da0686c3415c44e050 is 50, key is test_row_0/A:col10/1732116230685/Put/seqid=0 2024-11-20T15:23:52,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742552_1728 (size=12454) 2024-11-20T15:23:52,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-11-20T15:23:52,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:52,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:52,334 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:52,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116292332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:52,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:52,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116292334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:52,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:52,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:52,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35590 deadline: 1732116292335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:52,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116292335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:52,336 DEBUG [Thread-2752 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8178 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., hostname=0b62285ead89,33387,1732116069954, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:23:52,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:52,421 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120574e468cf3344e1392c32fd1da40cc81_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120574e468cf3344e1392c32fd1da40cc81_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:52,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/1119739569c24c8e9929ac8b93a9e495, store: [table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:52,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/1119739569c24c8e9929ac8b93a9e495 is 175, key is test_row_0/A:col10/1732116230685/Put/seqid=0 2024-11-20T15:23:52,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742553_1729 (size=31255) 2024-11-20T15:23:52,436 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:52,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116292435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:52,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:52,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116292437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:52,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:52,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116292437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:52,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:52,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116292638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:52,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:52,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116292639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:52,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:52,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116292640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:52,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-11-20T15:23:52,810 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:52,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116292808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:52,827 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=372, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/1119739569c24c8e9929ac8b93a9e495 2024-11-20T15:23:52,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/6e0e6908679a4c8f83a4e0bbb05cabad is 50, key is test_row_0/B:col10/1732116230685/Put/seqid=0 2024-11-20T15:23:52,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742554_1730 (size=12301) 2024-11-20T15:23:52,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:52,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116292941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:52,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:52,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116292943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:52,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:52,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116292944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:53,238 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/6e0e6908679a4c8f83a4e0bbb05cabad 2024-11-20T15:23:53,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/a4a0252644c842ab8688fa4dfca3945d is 50, key is test_row_0/C:col10/1732116230685/Put/seqid=0 2024-11-20T15:23:53,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742555_1731 (size=12301) 2024-11-20T15:23:53,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:53,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116293445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:53,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:53,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116293445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:53,448 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:53,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116293447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:53,649 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/a4a0252644c842ab8688fa4dfca3945d 2024-11-20T15:23:53,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-11-20T15:23:53,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/1119739569c24c8e9929ac8b93a9e495 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/1119739569c24c8e9929ac8b93a9e495 2024-11-20T15:23:53,656 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/1119739569c24c8e9929ac8b93a9e495, entries=150, sequenceid=372, filesize=30.5 K 2024-11-20T15:23:53,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/6e0e6908679a4c8f83a4e0bbb05cabad as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/6e0e6908679a4c8f83a4e0bbb05cabad 2024-11-20T15:23:53,659 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/6e0e6908679a4c8f83a4e0bbb05cabad, entries=150, sequenceid=372, filesize=12.0 K 2024-11-20T15:23:53,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/a4a0252644c842ab8688fa4dfca3945d as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/a4a0252644c842ab8688fa4dfca3945d 2024-11-20T15:23:53,663 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/a4a0252644c842ab8688fa4dfca3945d, entries=150, sequenceid=372, filesize=12.0 K 2024-11-20T15:23:53,663 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for b9a29e8e077d15da0686c3415c44e050 in 1656ms, sequenceid=372, compaction requested=false 2024-11-20T15:23:53,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2538): Flush status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:53,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:53,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=188 2024-11-20T15:23:53,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=188 2024-11-20T15:23:53,666 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=188, resume processing ppid=187 2024-11-20T15:23:53,666 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=188, ppid=187, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1140 sec 2024-11-20T15:23:53,667 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=187, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees in 2.1180 sec 2024-11-20T15:23:54,459 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b9a29e8e077d15da0686c3415c44e050 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T15:23:54,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:54,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=A 2024-11-20T15:23:54,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:54,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=B 2024-11-20T15:23:54,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:54,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=C 2024-11-20T15:23:54,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:54,470 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112059b79eb545dd4ca9935ac82a19b276e9_b9a29e8e077d15da0686c3415c44e050 is 50, key is test_row_0/A:col10/1732116234456/Put/seqid=0 2024-11-20T15:23:54,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:54,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116294469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:54,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:54,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116294469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:54,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:54,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116294470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:54,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742556_1732 (size=12454) 2024-11-20T15:23:54,490 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:54,497 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112059b79eb545dd4ca9935ac82a19b276e9_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112059b79eb545dd4ca9935ac82a19b276e9_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:54,497 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/052ea90c32fb4997810700e8a9cd0df4, store: [table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:54,498 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/052ea90c32fb4997810700e8a9cd0df4 is 175, key is test_row_0/A:col10/1732116234456/Put/seqid=0 2024-11-20T15:23:54,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742557_1733 (size=31255) 2024-11-20T15:23:54,575 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:54,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116294574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:54,575 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:54,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116294574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:54,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:54,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116294574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:54,777 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:54,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116294776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:54,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:54,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116294777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:54,782 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:54,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116294778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:54,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:54,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35548 deadline: 1732116294815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:54,818 DEBUG [Thread-2750 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4133 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., hostname=0b62285ead89,33387,1732116069954, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor41.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T15:23:54,924 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=400, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/052ea90c32fb4997810700e8a9cd0df4 2024-11-20T15:23:54,941 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/83bae3ae3c3f47a9ad9a8bed9c79f88f is 50, key is test_row_0/B:col10/1732116234456/Put/seqid=0 2024-11-20T15:23:54,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742558_1734 (size=12301) 2024-11-20T15:23:55,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:55,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116295079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:55,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:55,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116295083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:55,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:55,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116295084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:55,374 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=400 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/83bae3ae3c3f47a9ad9a8bed9c79f88f 2024-11-20T15:23:55,381 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/6b33d4a9c25c46e4a1e3b64f4c5cbee5 is 50, key is test_row_0/C:col10/1732116234456/Put/seqid=0 2024-11-20T15:23:55,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742559_1735 (size=12301) 2024-11-20T15:23:55,423 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=400 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/6b33d4a9c25c46e4a1e3b64f4c5cbee5 2024-11-20T15:23:55,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/052ea90c32fb4997810700e8a9cd0df4 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/052ea90c32fb4997810700e8a9cd0df4 2024-11-20T15:23:55,435 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/052ea90c32fb4997810700e8a9cd0df4, entries=150, sequenceid=400, filesize=30.5 K 2024-11-20T15:23:55,436 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/83bae3ae3c3f47a9ad9a8bed9c79f88f as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/83bae3ae3c3f47a9ad9a8bed9c79f88f 2024-11-20T15:23:55,440 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/83bae3ae3c3f47a9ad9a8bed9c79f88f, entries=150, sequenceid=400, filesize=12.0 K 2024-11-20T15:23:55,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/6b33d4a9c25c46e4a1e3b64f4c5cbee5 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/6b33d4a9c25c46e4a1e3b64f4c5cbee5 2024-11-20T15:23:55,448 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/6b33d4a9c25c46e4a1e3b64f4c5cbee5, entries=150, sequenceid=400, filesize=12.0 K 2024-11-20T15:23:55,449 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for b9a29e8e077d15da0686c3415c44e050 in 991ms, sequenceid=400, compaction requested=true 2024-11-20T15:23:55,449 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:55,449 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:55,450 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94583 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:55,450 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1540): b9a29e8e077d15da0686c3415c44e050/A is initiating minor compaction (all files) 2024-11-20T15:23:55,450 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b9a29e8e077d15da0686c3415c44e050/A in TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:55,450 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/7fe123a90bb540f28eb12dc842616cbb, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/1119739569c24c8e9929ac8b93a9e495, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/052ea90c32fb4997810700e8a9cd0df4] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp, totalSize=92.4 K 2024-11-20T15:23:55,450 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:55,450 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. files: [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/7fe123a90bb540f28eb12dc842616cbb, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/1119739569c24c8e9929ac8b93a9e495, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/052ea90c32fb4997810700e8a9cd0df4] 2024-11-20T15:23:55,451 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7fe123a90bb540f28eb12dc842616cbb, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732116229543 2024-11-20T15:23:55,451 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1119739569c24c8e9929ac8b93a9e495, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732116230678 2024-11-20T15:23:55,452 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] compactions.Compactor(224): Compacting 052ea90c32fb4997810700e8a9cd0df4, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=400, earliestPutTs=1732116232330 2024-11-20T15:23:55,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b9a29e8e077d15da0686c3415c44e050:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T15:23:55,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:55,455 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:55,456 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:55,457 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): b9a29e8e077d15da0686c3415c44e050/B is initiating minor compaction (all files) 2024-11-20T15:23:55,457 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b9a29e8e077d15da0686c3415c44e050/B in TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:55,457 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/11e9ab7977f54349a303875027293d33, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/6e0e6908679a4c8f83a4e0bbb05cabad, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/83bae3ae3c3f47a9ad9a8bed9c79f88f] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp, totalSize=36.8 K 2024-11-20T15:23:55,457 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 11e9ab7977f54349a303875027293d33, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732116229543 2024-11-20T15:23:55,458 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e0e6908679a4c8f83a4e0bbb05cabad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732116230678 2024-11-20T15:23:55,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b9a29e8e077d15da0686c3415c44e050:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T15:23:55,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:55,458 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 83bae3ae3c3f47a9ad9a8bed9c79f88f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=400, earliestPutTs=1732116232330 2024-11-20T15:23:55,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b9a29e8e077d15da0686c3415c44e050:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T15:23:55,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:55,459 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:55,463 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120193ddf244bb249ad94e39bea79f56df7_b9a29e8e077d15da0686c3415c44e050 store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:55,465 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120193ddf244bb249ad94e39bea79f56df7_b9a29e8e077d15da0686c3415c44e050, store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:55,466 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120193ddf244bb249ad94e39bea79f56df7_b9a29e8e077d15da0686c3415c44e050 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:55,478 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b9a29e8e077d15da0686c3415c44e050#B#compaction#628 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:55,478 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/4d726ba99f4247aea6ca0e90dba7318c is 50, key is test_row_0/B:col10/1732116234456/Put/seqid=0 2024-11-20T15:23:55,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742560_1736 (size=4469) 2024-11-20T15:23:55,502 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b9a29e8e077d15da0686c3415c44e050#A#compaction#627 average throughput is 0.57 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:55,503 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/7953937bb5e74e4c973f4b72a6b60656 is 175, key is test_row_0/A:col10/1732116234456/Put/seqid=0 2024-11-20T15:23:55,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742561_1737 (size=13221) 2024-11-20T15:23:55,520 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/4d726ba99f4247aea6ca0e90dba7318c as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/4d726ba99f4247aea6ca0e90dba7318c 2024-11-20T15:23:55,527 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b9a29e8e077d15da0686c3415c44e050/B of b9a29e8e077d15da0686c3415c44e050 into 4d726ba99f4247aea6ca0e90dba7318c(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:55,527 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:55,527 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., storeName=b9a29e8e077d15da0686c3415c44e050/B, priority=13, startTime=1732116235455; duration=0sec 2024-11-20T15:23:55,527 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T15:23:55,527 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b9a29e8e077d15da0686c3415c44e050:B 2024-11-20T15:23:55,527 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T15:23:55,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742562_1738 (size=32175) 2024-11-20T15:23:55,531 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T15:23:55,531 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1540): b9a29e8e077d15da0686c3415c44e050/C is initiating minor compaction (all files) 2024-11-20T15:23:55,531 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b9a29e8e077d15da0686c3415c44e050/C in TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:55,531 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/d811169b9246401b9b7ed2b18c8de703, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/a4a0252644c842ab8688fa4dfca3945d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/6b33d4a9c25c46e4a1e3b64f4c5cbee5] into tmpdir=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp, totalSize=36.8 K 2024-11-20T15:23:55,531 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting d811169b9246401b9b7ed2b18c8de703, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732116229543 2024-11-20T15:23:55,532 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting a4a0252644c842ab8688fa4dfca3945d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732116230678 2024-11-20T15:23:55,532 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b33d4a9c25c46e4a1e3b64f4c5cbee5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=400, earliestPutTs=1732116232330 2024-11-20T15:23:55,538 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/7953937bb5e74e4c973f4b72a6b60656 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/7953937bb5e74e4c973f4b72a6b60656 2024-11-20T15:23:55,547 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b9a29e8e077d15da0686c3415c44e050/A of b9a29e8e077d15da0686c3415c44e050 into 7953937bb5e74e4c973f4b72a6b60656(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:55,547 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:55,547 INFO [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., storeName=b9a29e8e077d15da0686c3415c44e050/A, priority=13, startTime=1732116235449; duration=0sec 2024-11-20T15:23:55,547 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:55,547 DEBUG [RS:0;0b62285ead89:33387-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b9a29e8e077d15da0686c3415c44e050:A 2024-11-20T15:23:55,550 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b9a29e8e077d15da0686c3415c44e050#C#compaction#629 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T15:23:55,551 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/db549c3fa58a4874a8f05c6b2a11cc6f is 50, key is test_row_0/C:col10/1732116234456/Put/seqid=0 2024-11-20T15:23:55,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742563_1739 (size=13221) 2024-11-20T15:23:55,583 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/db549c3fa58a4874a8f05c6b2a11cc6f as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/db549c3fa58a4874a8f05c6b2a11cc6f 2024-11-20T15:23:55,591 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b9a29e8e077d15da0686c3415c44e050/C of b9a29e8e077d15da0686c3415c44e050 into db549c3fa58a4874a8f05c6b2a11cc6f(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T15:23:55,591 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:55,591 INFO [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050., storeName=b9a29e8e077d15da0686c3415c44e050/C, priority=13, startTime=1732116235458; duration=0sec 2024-11-20T15:23:55,591 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T15:23:55,591 DEBUG [RS:0;0b62285ead89:33387-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b9a29e8e077d15da0686c3415c44e050:C 2024-11-20T15:23:55,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(8581): Flush requested on b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:55,592 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b9a29e8e077d15da0686c3415c44e050 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T15:23:55,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=A 2024-11-20T15:23:55,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:55,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=B 2024-11-20T15:23:55,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:55,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=C 2024-11-20T15:23:55,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:55,608 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120aabe844f5f6b4c2a8e1036ee7c3fc997_b9a29e8e077d15da0686c3415c44e050 is 50, key is test_row_0/A:col10/1732116235591/Put/seqid=0 2024-11-20T15:23:55,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742564_1740 (size=12454) 2024-11-20T15:23:55,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:55,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116295632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:55,635 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:55,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116295633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:55,636 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:55,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116295634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:55,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-11-20T15:23:55,654 INFO [Thread-2754 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 187 completed 2024-11-20T15:23:55,655 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T15:23:55,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=189, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=189, table=TestAcidGuarantees 2024-11-20T15:23:55,657 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=189, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=189, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T15:23:55,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-11-20T15:23:55,659 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=189, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=189, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T15:23:55,659 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=190, ppid=189, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T15:23:55,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:55,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116295737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:55,739 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:55,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116295737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:55,739 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:55,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116295737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:55,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-11-20T15:23:55,811 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:55,812 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=190 2024-11-20T15:23:55,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:55,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:55,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:55,812 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] handler.RSProcedureHandler(58): pid=190 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:55,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=190 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:55,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=190 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:55,857 DEBUG [Thread-2757 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5f1754bc to 127.0.0.1:62338 2024-11-20T15:23:55,857 DEBUG [Thread-2757 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:55,858 DEBUG [Thread-2761 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5bb75907 to 127.0.0.1:62338 2024-11-20T15:23:55,858 DEBUG [Thread-2761 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:55,858 DEBUG [Thread-2759 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d9113f3 to 127.0.0.1:62338 2024-11-20T15:23:55,858 DEBUG [Thread-2759 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:55,860 DEBUG [Thread-2763 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c1d3a95 to 127.0.0.1:62338 2024-11-20T15:23:55,860 DEBUG [Thread-2763 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:55,861 DEBUG [Thread-2755 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7846cb78 to 127.0.0.1:62338 2024-11-20T15:23:55,861 DEBUG [Thread-2755 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:55,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:55,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:55,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116295940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:55,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116295941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:55,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:55,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116295941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:55,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-11-20T15:23:55,965 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:55,965 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=190 2024-11-20T15:23:55,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:55,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:55,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:55,966 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] handler.RSProcedureHandler(58): pid=190 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:55,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=190 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:55,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=190 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:56,021 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:56,023 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120aabe844f5f6b4c2a8e1036ee7c3fc997_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120aabe844f5f6b4c2a8e1036ee7c3fc997_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:56,024 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/f3472c3c7dae45c49c4b1eaf3accfe17, store: [table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:56,025 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/f3472c3c7dae45c49c4b1eaf3accfe17 is 175, key is test_row_0/A:col10/1732116235591/Put/seqid=0 2024-11-20T15:23:56,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742565_1741 (size=31255) 2024-11-20T15:23:56,117 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:56,118 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=190 2024-11-20T15:23:56,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:56,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:56,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:56,118 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] handler.RSProcedureHandler(58): pid=190 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:56,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=190 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:56,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=190 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:56,244 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:56,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:56,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:56,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116296244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:56,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116296244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:56,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116296244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:56,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-11-20T15:23:56,270 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:56,271 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=190 2024-11-20T15:23:56,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:56,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:56,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:56,271 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] handler.RSProcedureHandler(58): pid=190 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:56,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=190 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:56,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=190 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:56,423 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:56,423 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=190 2024-11-20T15:23:56,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:56,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:56,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:56,424 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] handler.RSProcedureHandler(58): pid=190 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:56,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=190 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:56,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=190 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:56,428 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=416, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/f3472c3c7dae45c49c4b1eaf3accfe17 2024-11-20T15:23:56,434 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/8370aa1d5af846a390a8973de729d1e1 is 50, key is test_row_0/B:col10/1732116235591/Put/seqid=0 2024-11-20T15:23:56,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742566_1742 (size=12301) 2024-11-20T15:23:56,437 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/8370aa1d5af846a390a8973de729d1e1 2024-11-20T15:23:56,443 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/11d731c39bf64498a0dee06b47e36eaf is 50, key is test_row_0/C:col10/1732116235591/Put/seqid=0 2024-11-20T15:23:56,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742567_1743 (size=12301) 2024-11-20T15:23:56,576 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:56,576 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=190 2024-11-20T15:23:56,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:56,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:56,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:56,576 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] handler.RSProcedureHandler(58): pid=190 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:56,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=190 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:56,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=190 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:56,728 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:56,729 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=190 2024-11-20T15:23:56,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:56,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. as already flushing 2024-11-20T15:23:56,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:56,729 ERROR [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] handler.RSProcedureHandler(58): pid=190 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:56,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=190 java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:56,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4114): Remote procedure failed, pid=190 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T15:23:56,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:56,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35576 deadline: 1732116296746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:56,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:56,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35594 deadline: 1732116296747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:56,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T15:23:56,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33387 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35570 deadline: 1732116296750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 2024-11-20T15:23:56,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-11-20T15:23:56,847 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/11d731c39bf64498a0dee06b47e36eaf 2024-11-20T15:23:56,850 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/f3472c3c7dae45c49c4b1eaf3accfe17 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/f3472c3c7dae45c49c4b1eaf3accfe17 2024-11-20T15:23:56,853 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/f3472c3c7dae45c49c4b1eaf3accfe17, entries=150, sequenceid=416, filesize=30.5 K 2024-11-20T15:23:56,854 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/8370aa1d5af846a390a8973de729d1e1 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/8370aa1d5af846a390a8973de729d1e1 2024-11-20T15:23:56,858 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/8370aa1d5af846a390a8973de729d1e1, entries=150, sequenceid=416, filesize=12.0 K 2024-11-20T15:23:56,858 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/11d731c39bf64498a0dee06b47e36eaf as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/11d731c39bf64498a0dee06b47e36eaf 2024-11-20T15:23:56,861 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/11d731c39bf64498a0dee06b47e36eaf, entries=150, sequenceid=416, filesize=12.0 K 2024-11-20T15:23:56,862 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for b9a29e8e077d15da0686c3415c44e050 in 1270ms, sequenceid=416, compaction requested=false 2024-11-20T15:23:56,862 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:56,881 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:23:56,882 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33387 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=190 2024-11-20T15:23:56,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:56,882 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(2837): Flushing b9a29e8e077d15da0686c3415c44e050 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T15:23:56,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=A 2024-11-20T15:23:56,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:56,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=B 2024-11-20T15:23:56,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:56,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=C 2024-11-20T15:23:56,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:23:56,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a39931fa16814e149cdb44f355fbf27e_b9a29e8e077d15da0686c3415c44e050 is 50, key is test_row_0/A:col10/1732116235626/Put/seqid=0 2024-11-20T15:23:56,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742568_1744 (size=12454) 2024-11-20T15:23:57,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:23:57,295 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a39931fa16814e149cdb44f355fbf27e_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a39931fa16814e149cdb44f355fbf27e_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:23:57,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/ffcd8376caf14e42a5653a67878710d1, store: [table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:23:57,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/ffcd8376caf14e42a5653a67878710d1 is 175, key is test_row_0/A:col10/1732116235626/Put/seqid=0 2024-11-20T15:23:57,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742569_1745 (size=31255) 2024-11-20T15:23:57,301 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=439, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/ffcd8376caf14e42a5653a67878710d1 2024-11-20T15:23:57,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/ddcca627044b41389c34557a30a579f5 is 50, key is test_row_0/B:col10/1732116235626/Put/seqid=0 2024-11-20T15:23:57,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742570_1746 (size=12301) 2024-11-20T15:23:57,710 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=439 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/ddcca627044b41389c34557a30a579f5 2024-11-20T15:23:57,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/73920372b0f64b1aaa236eb4eb2f9c75 is 50, key is test_row_0/C:col10/1732116235626/Put/seqid=0 2024-11-20T15:23:57,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742571_1747 (size=12301) 2024-11-20T15:23:57,721 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=439 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/73920372b0f64b1aaa236eb4eb2f9c75 2024-11-20T15:23:57,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/ffcd8376caf14e42a5653a67878710d1 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/ffcd8376caf14e42a5653a67878710d1 2024-11-20T15:23:57,727 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/ffcd8376caf14e42a5653a67878710d1, entries=150, sequenceid=439, filesize=30.5 K 2024-11-20T15:23:57,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/ddcca627044b41389c34557a30a579f5 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ddcca627044b41389c34557a30a579f5 2024-11-20T15:23:57,731 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ddcca627044b41389c34557a30a579f5, entries=150, sequenceid=439, filesize=12.0 K 2024-11-20T15:23:57,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/73920372b0f64b1aaa236eb4eb2f9c75 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/73920372b0f64b1aaa236eb4eb2f9c75 2024-11-20T15:23:57,734 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/73920372b0f64b1aaa236eb4eb2f9c75, entries=150, sequenceid=439, filesize=12.0 K 2024-11-20T15:23:57,735 INFO [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=0 B/0 for b9a29e8e077d15da0686c3415c44e050 in 853ms, sequenceid=439, compaction requested=true 2024-11-20T15:23:57,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(2538): Flush status journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:23:57,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:23:57,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0b62285ead89:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=190 2024-11-20T15:23:57,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster(4106): Remote procedure done, pid=190 2024-11-20T15:23:57,737 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=190, resume processing ppid=189 2024-11-20T15:23:57,737 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=190, ppid=189, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0770 sec 2024-11-20T15:23:57,738 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=189, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=189, table=TestAcidGuarantees in 2.0820 sec 2024-11-20T15:23:57,751 DEBUG [Thread-2748 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6e047c09 to 127.0.0.1:62338 2024-11-20T15:23:57,751 DEBUG [Thread-2748 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:57,755 DEBUG [Thread-2744 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06094c70 to 127.0.0.1:62338 2024-11-20T15:23:57,755 DEBUG [Thread-2744 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:57,755 DEBUG [Thread-2746 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x103dfc6e to 127.0.0.1:62338 2024-11-20T15:23:57,756 DEBUG [Thread-2746 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:23:57,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-11-20T15:23:57,762 INFO [Thread-2754 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 189 completed 2024-11-20T15:23:58,848 DEBUG [Thread-2750 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x60d631a3 to 127.0.0.1:62338 2024-11-20T15:23:58,848 DEBUG [Thread-2750 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:24:02,418 DEBUG [Thread-2752 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58971172 to 127.0.0.1:62338 2024-11-20T15:24:02,418 DEBUG [Thread-2752 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:24:02,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T15:24:02,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 73 2024-11-20T15:24:02,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 55 2024-11-20T15:24:02,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 53 2024-11-20T15:24:02,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 121 2024-11-20T15:24:02,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 48 2024-11-20T15:24:02,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T15:24:02,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6537 2024-11-20T15:24:02,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6259 2024-11-20T15:24:02,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6179 2024-11-20T15:24:02,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6533 2024-11-20T15:24:02,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6280 2024-11-20T15:24:02,418 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T15:24:02,418 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T15:24:02,418 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x537a66f8 to 127.0.0.1:62338 2024-11-20T15:24:02,418 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:24:02,419 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T15:24:02,419 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T15:24:02,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=191, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T15:24:02,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-11-20T15:24:02,422 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732116242422"}]},"ts":"1732116242422"} 2024-11-20T15:24:02,423 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T15:24:02,425 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T15:24:02,426 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=192, ppid=191, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T15:24:02,427 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b9a29e8e077d15da0686c3415c44e050, UNASSIGN}] 2024-11-20T15:24:02,429 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b9a29e8e077d15da0686c3415c44e050, UNASSIGN 2024-11-20T15:24:02,429 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=193 updating hbase:meta row=b9a29e8e077d15da0686c3415c44e050, regionState=CLOSING, regionLocation=0b62285ead89,33387,1732116069954 2024-11-20T15:24:02,430 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T15:24:02,430 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=194, ppid=193, state=RUNNABLE; CloseRegionProcedure b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954}] 2024-11-20T15:24:02,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-11-20T15:24:02,581 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 0b62285ead89,33387,1732116069954 2024-11-20T15:24:02,582 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] handler.UnassignRegionHandler(124): Close b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:02,582 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T15:24:02,582 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegion(1681): Closing b9a29e8e077d15da0686c3415c44e050, disabling compactions & flushes 2024-11-20T15:24:02,582 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:24:02,582 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:24:02,582 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. after waiting 0 ms 2024-11-20T15:24:02,582 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:24:02,582 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegion(2837): Flushing b9a29e8e077d15da0686c3415c44e050 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T15:24:02,583 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=A 2024-11-20T15:24:02,583 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:24:02,583 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=B 2024-11-20T15:24:02,583 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:24:02,583 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b9a29e8e077d15da0686c3415c44e050, store=C 2024-11-20T15:24:02,583 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T15:24:02,589 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120af2700b2baf64a1490c2291689db65c4_b9a29e8e077d15da0686c3415c44e050 is 50, key is test_row_1/A:col10/1732116242417/Put/seqid=0 2024-11-20T15:24:02,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742572_1748 (size=9914) 2024-11-20T15:24:02,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-11-20T15:24:02,993 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T15:24:02,996 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120af2700b2baf64a1490c2291689db65c4_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120af2700b2baf64a1490c2291689db65c4_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:02,997 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/1d0cd6c4f5d84b6f9d0c3cfc200bad4f, store: [table=TestAcidGuarantees family=A region=b9a29e8e077d15da0686c3415c44e050] 2024-11-20T15:24:02,998 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/1d0cd6c4f5d84b6f9d0c3cfc200bad4f is 175, key is test_row_1/A:col10/1732116242417/Put/seqid=0 2024-11-20T15:24:03,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742573_1749 (size=22561) 2024-11-20T15:24:03,001 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=447, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/1d0cd6c4f5d84b6f9d0c3cfc200bad4f 2024-11-20T15:24:03,008 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/097d8a4d000f479aad507937ec8f10a0 is 50, key is test_row_1/B:col10/1732116242417/Put/seqid=0 2024-11-20T15:24:03,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742574_1750 (size=9857) 2024-11-20T15:24:03,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-11-20T15:24:03,411 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/097d8a4d000f479aad507937ec8f10a0 2024-11-20T15:24:03,431 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/36c3954dfd2a48379755c8f26b811b44 is 50, key is test_row_1/C:col10/1732116242417/Put/seqid=0 2024-11-20T15:24:03,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742575_1751 (size=9857) 2024-11-20T15:24:03,451 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/36c3954dfd2a48379755c8f26b811b44 2024-11-20T15:24:03,455 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/A/1d0cd6c4f5d84b6f9d0c3cfc200bad4f as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/1d0cd6c4f5d84b6f9d0c3cfc200bad4f 2024-11-20T15:24:03,459 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/1d0cd6c4f5d84b6f9d0c3cfc200bad4f, entries=100, sequenceid=447, filesize=22.0 K 2024-11-20T15:24:03,460 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/B/097d8a4d000f479aad507937ec8f10a0 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/097d8a4d000f479aad507937ec8f10a0 2024-11-20T15:24:03,464 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/097d8a4d000f479aad507937ec8f10a0, entries=100, sequenceid=447, filesize=9.6 K 2024-11-20T15:24:03,465 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/.tmp/C/36c3954dfd2a48379755c8f26b811b44 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/36c3954dfd2a48379755c8f26b811b44 2024-11-20T15:24:03,469 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/36c3954dfd2a48379755c8f26b811b44, entries=100, sequenceid=447, filesize=9.6 K 2024-11-20T15:24:03,470 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for b9a29e8e077d15da0686c3415c44e050 in 888ms, sequenceid=447, compaction requested=true 2024-11-20T15:24:03,472 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/7a57795357a549d9a2fd0b799d9f63a3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/dd117c7700024c0ea9aa3b0fcd391bf5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/774715f3adcd449c974531bd9d63dce4, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/08135c9f4f5045eb808024472faab16e, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/a21ad136fe9a478ebe937e1a599d8255, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/47f2ad5ae41d4ad886fb113ad415bfd8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/8e0aa2d88527499fa3a07443352c44fa, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/ac09a5e1893044dcb5ed373402f532b3, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/f52cd2c506d54eb58db048b806cf60f8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/46577b5ff4684cd395ae2955f51830d8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/6a7a8fef53124bb0a9142d69878d08f7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/c9f2093150e2406e98960cb3036def06, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/166449c9a037499588eca55a8c870eb5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/c3a3fa1d74844fa49c9a5a95e7112b21, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/c0f04c54eea7410b8c0009fe8d517f9a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/069da517b5ee4b1088061ecc7a33e5e5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/8f903770642941c1b91dc1b28ec05f3c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/755cf64a8840410ba05841a1b270aa64, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/04e6cc82d6c249fca690a8ae73fd8027, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/d0651c53199f42b090045c168b77d943, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/62db193c898f45238a93dff9eed73a72, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/9e6979c9c066401a9c27612054d6ecd8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/ff29c2b86ad54fda95e60f87e4e6904c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/cf630ba61d094ceea32f9897dcea2b81, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/7fe123a90bb540f28eb12dc842616cbb, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/1119739569c24c8e9929ac8b93a9e495, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/052ea90c32fb4997810700e8a9cd0df4] to archive 2024-11-20T15:24:03,473 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T15:24:03,476 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/7a57795357a549d9a2fd0b799d9f63a3 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/7a57795357a549d9a2fd0b799d9f63a3 2024-11-20T15:24:03,478 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/dd117c7700024c0ea9aa3b0fcd391bf5 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/dd117c7700024c0ea9aa3b0fcd391bf5 2024-11-20T15:24:03,480 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/774715f3adcd449c974531bd9d63dce4 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/774715f3adcd449c974531bd9d63dce4 2024-11-20T15:24:03,482 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/08135c9f4f5045eb808024472faab16e to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/08135c9f4f5045eb808024472faab16e 2024-11-20T15:24:03,484 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/a21ad136fe9a478ebe937e1a599d8255 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/a21ad136fe9a478ebe937e1a599d8255 2024-11-20T15:24:03,487 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/47f2ad5ae41d4ad886fb113ad415bfd8 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/47f2ad5ae41d4ad886fb113ad415bfd8 2024-11-20T15:24:03,488 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/8e0aa2d88527499fa3a07443352c44fa to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/8e0aa2d88527499fa3a07443352c44fa 2024-11-20T15:24:03,490 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/ac09a5e1893044dcb5ed373402f532b3 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/ac09a5e1893044dcb5ed373402f532b3 2024-11-20T15:24:03,492 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/f52cd2c506d54eb58db048b806cf60f8 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/f52cd2c506d54eb58db048b806cf60f8 2024-11-20T15:24:03,494 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/46577b5ff4684cd395ae2955f51830d8 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/46577b5ff4684cd395ae2955f51830d8 2024-11-20T15:24:03,495 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/6a7a8fef53124bb0a9142d69878d08f7 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/6a7a8fef53124bb0a9142d69878d08f7 2024-11-20T15:24:03,499 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/c9f2093150e2406e98960cb3036def06 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/c9f2093150e2406e98960cb3036def06 2024-11-20T15:24:03,501 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/166449c9a037499588eca55a8c870eb5 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/166449c9a037499588eca55a8c870eb5 2024-11-20T15:24:03,503 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/c3a3fa1d74844fa49c9a5a95e7112b21 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/c3a3fa1d74844fa49c9a5a95e7112b21 2024-11-20T15:24:03,504 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/c0f04c54eea7410b8c0009fe8d517f9a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/c0f04c54eea7410b8c0009fe8d517f9a 2024-11-20T15:24:03,507 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/069da517b5ee4b1088061ecc7a33e5e5 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/069da517b5ee4b1088061ecc7a33e5e5 2024-11-20T15:24:03,509 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/8f903770642941c1b91dc1b28ec05f3c to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/8f903770642941c1b91dc1b28ec05f3c 2024-11-20T15:24:03,511 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/755cf64a8840410ba05841a1b270aa64 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/755cf64a8840410ba05841a1b270aa64 2024-11-20T15:24:03,513 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/04e6cc82d6c249fca690a8ae73fd8027 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/04e6cc82d6c249fca690a8ae73fd8027 2024-11-20T15:24:03,514 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/d0651c53199f42b090045c168b77d943 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/d0651c53199f42b090045c168b77d943 2024-11-20T15:24:03,515 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/62db193c898f45238a93dff9eed73a72 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/62db193c898f45238a93dff9eed73a72 2024-11-20T15:24:03,517 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/9e6979c9c066401a9c27612054d6ecd8 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/9e6979c9c066401a9c27612054d6ecd8 2024-11-20T15:24:03,518 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/ff29c2b86ad54fda95e60f87e4e6904c to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/ff29c2b86ad54fda95e60f87e4e6904c 2024-11-20T15:24:03,520 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/cf630ba61d094ceea32f9897dcea2b81 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/cf630ba61d094ceea32f9897dcea2b81 2024-11-20T15:24:03,521 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/7fe123a90bb540f28eb12dc842616cbb to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/7fe123a90bb540f28eb12dc842616cbb 2024-11-20T15:24:03,523 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/1119739569c24c8e9929ac8b93a9e495 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/1119739569c24c8e9929ac8b93a9e495 2024-11-20T15:24:03,524 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/052ea90c32fb4997810700e8a9cd0df4 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/052ea90c32fb4997810700e8a9cd0df4 2024-11-20T15:24:03,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-11-20T15:24:03,528 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/6700ca8a7ae644b9a5dd8d2782484ab7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ab9495825b044a6a93c12b4cf7dc022a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/08ca299d87934a51be919577123e80b0, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/1499798425a344f491d2463dd3be8d87, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/0bf42752e8e24994b45eaef4a8acd251, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/9a97685e32c741efbcb963ce6da24567, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/45c57cf3511642499ff4971e4e1060d4, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/4d019217366b47d09037bb62dc28cea8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/8244469008ba4b408e84284c80396e96, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/f66613cb22f34b709b4f6939d315cf01, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/85b7160fabce43babca87488befefe65, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/3ef0eb2ae314437db15a8ba5f1748c34, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/f7977095500a43eb8ebf2bc8f9b40c6c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/e45a00b04c5344b7a6f0aaf04c0f89f7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/0332e5125b5c470a934b1ab256552569, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/636a23a06d664a5b91ac46e0dc4148e1, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/b9c95357b09a424eb1e4f0d45b9dbf53, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ec57e53f7fbc4ee8b0dc26fbfaeb8a01, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/1f13bdcd5aef4ff4a94715e6f411db86, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/16dcb0787fae4a85b8469aa1fded9545, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/c3d569173da242fe8a86533a5b82bf87, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/b38871d1e0f24c5c91516b79f58f0ff8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ccdedebcf5b74e75b184ab309c5ceeb0, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/11e9ab7977f54349a303875027293d33, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ccba6a2652094e50988d20f429fa844a, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/6e0e6908679a4c8f83a4e0bbb05cabad, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/83bae3ae3c3f47a9ad9a8bed9c79f88f] to archive 2024-11-20T15:24:03,529 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T15:24:03,530 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/6700ca8a7ae644b9a5dd8d2782484ab7 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/6700ca8a7ae644b9a5dd8d2782484ab7 2024-11-20T15:24:03,532 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ab9495825b044a6a93c12b4cf7dc022a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ab9495825b044a6a93c12b4cf7dc022a 2024-11-20T15:24:03,533 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/08ca299d87934a51be919577123e80b0 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/08ca299d87934a51be919577123e80b0 2024-11-20T15:24:03,534 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/1499798425a344f491d2463dd3be8d87 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/1499798425a344f491d2463dd3be8d87 2024-11-20T15:24:03,535 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/0bf42752e8e24994b45eaef4a8acd251 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/0bf42752e8e24994b45eaef4a8acd251 2024-11-20T15:24:03,538 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/9a97685e32c741efbcb963ce6da24567 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/9a97685e32c741efbcb963ce6da24567 2024-11-20T15:24:03,539 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/45c57cf3511642499ff4971e4e1060d4 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/45c57cf3511642499ff4971e4e1060d4 2024-11-20T15:24:03,540 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/4d019217366b47d09037bb62dc28cea8 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/4d019217366b47d09037bb62dc28cea8 2024-11-20T15:24:03,542 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/8244469008ba4b408e84284c80396e96 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/8244469008ba4b408e84284c80396e96 2024-11-20T15:24:03,544 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/f66613cb22f34b709b4f6939d315cf01 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/f66613cb22f34b709b4f6939d315cf01 2024-11-20T15:24:03,545 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/85b7160fabce43babca87488befefe65 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/85b7160fabce43babca87488befefe65 2024-11-20T15:24:03,547 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/3ef0eb2ae314437db15a8ba5f1748c34 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/3ef0eb2ae314437db15a8ba5f1748c34 2024-11-20T15:24:03,548 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/f7977095500a43eb8ebf2bc8f9b40c6c to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/f7977095500a43eb8ebf2bc8f9b40c6c 2024-11-20T15:24:03,549 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/e45a00b04c5344b7a6f0aaf04c0f89f7 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/e45a00b04c5344b7a6f0aaf04c0f89f7 2024-11-20T15:24:03,550 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/0332e5125b5c470a934b1ab256552569 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/0332e5125b5c470a934b1ab256552569 2024-11-20T15:24:03,551 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/636a23a06d664a5b91ac46e0dc4148e1 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/636a23a06d664a5b91ac46e0dc4148e1 2024-11-20T15:24:03,553 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/b9c95357b09a424eb1e4f0d45b9dbf53 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/b9c95357b09a424eb1e4f0d45b9dbf53 2024-11-20T15:24:03,554 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ec57e53f7fbc4ee8b0dc26fbfaeb8a01 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ec57e53f7fbc4ee8b0dc26fbfaeb8a01 2024-11-20T15:24:03,555 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/1f13bdcd5aef4ff4a94715e6f411db86 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/1f13bdcd5aef4ff4a94715e6f411db86 2024-11-20T15:24:03,557 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/16dcb0787fae4a85b8469aa1fded9545 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/16dcb0787fae4a85b8469aa1fded9545 2024-11-20T15:24:03,558 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/c3d569173da242fe8a86533a5b82bf87 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/c3d569173da242fe8a86533a5b82bf87 2024-11-20T15:24:03,559 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/b38871d1e0f24c5c91516b79f58f0ff8 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/b38871d1e0f24c5c91516b79f58f0ff8 2024-11-20T15:24:03,560 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ccdedebcf5b74e75b184ab309c5ceeb0 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ccdedebcf5b74e75b184ab309c5ceeb0 2024-11-20T15:24:03,562 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/11e9ab7977f54349a303875027293d33 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/11e9ab7977f54349a303875027293d33 2024-11-20T15:24:03,563 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ccba6a2652094e50988d20f429fa844a to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ccba6a2652094e50988d20f429fa844a 2024-11-20T15:24:03,564 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/6e0e6908679a4c8f83a4e0bbb05cabad to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/6e0e6908679a4c8f83a4e0bbb05cabad 2024-11-20T15:24:03,566 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/83bae3ae3c3f47a9ad9a8bed9c79f88f to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/83bae3ae3c3f47a9ad9a8bed9c79f88f 2024-11-20T15:24:03,567 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/cf2f7dd159c1466bac96005fe3cf8406, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/1c70eed8fdf54539bda93d0d153595e0, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/d232be41644240c4800a3bfea812ffe5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/dda5acad070e4cc78b08196a67a0f39c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/d9ec84991f214d06aa891a2b014968c7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/769a8c312f814f13af2ef15d849f5b96, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/2212a25e1e6d4aa6a78cfcb3e88751e8, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/5a28ccb1e64e4cb386df1c37b73c63f2, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/1f3eda66c97c4598a102868c8707b000, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/e1c9698fe0a44000b830663d9255ad89, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/f464e768de334fc094ab6ee1a08f2ca5, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/c1e67c0aa4d2472e809c3e9fa2add2c4, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/2098147f60fd494d986e487bcf450bba, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/25153a7be7824be7953e39337adb28a7, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/41637198ebc14780af436bf1b494375c, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/146eae16457c46a89871dce8bfcb5a58, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/6365ea60f74d4c7ea01224a97151d97f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/9d796ae710e148a69e58f91f63194903, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/6b2d8d3ce017473488ab3058539c3097, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/e1242db91e2642b5a4d7414711ac3875, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/cdf8ea84c6704165b0eb81b9e4a93911, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/9fff94855230425a8a9e7e33c47d8fc1, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/697187f1f244468b9648b7c6cb9fd41f, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/d811169b9246401b9b7ed2b18c8de703, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/036aad7f56e04523b518685f03667c4b, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/a4a0252644c842ab8688fa4dfca3945d, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/6b33d4a9c25c46e4a1e3b64f4c5cbee5] to archive 2024-11-20T15:24:03,568 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T15:24:03,570 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/cf2f7dd159c1466bac96005fe3cf8406 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/cf2f7dd159c1466bac96005fe3cf8406 2024-11-20T15:24:03,572 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/1c70eed8fdf54539bda93d0d153595e0 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/1c70eed8fdf54539bda93d0d153595e0 2024-11-20T15:24:03,575 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/d232be41644240c4800a3bfea812ffe5 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/d232be41644240c4800a3bfea812ffe5 2024-11-20T15:24:03,576 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/dda5acad070e4cc78b08196a67a0f39c to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/dda5acad070e4cc78b08196a67a0f39c 2024-11-20T15:24:03,577 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/d9ec84991f214d06aa891a2b014968c7 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/d9ec84991f214d06aa891a2b014968c7 2024-11-20T15:24:03,579 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/769a8c312f814f13af2ef15d849f5b96 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/769a8c312f814f13af2ef15d849f5b96 2024-11-20T15:24:03,580 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/2212a25e1e6d4aa6a78cfcb3e88751e8 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/2212a25e1e6d4aa6a78cfcb3e88751e8 2024-11-20T15:24:03,581 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/5a28ccb1e64e4cb386df1c37b73c63f2 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/5a28ccb1e64e4cb386df1c37b73c63f2 2024-11-20T15:24:03,583 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/1f3eda66c97c4598a102868c8707b000 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/1f3eda66c97c4598a102868c8707b000 2024-11-20T15:24:03,585 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/e1c9698fe0a44000b830663d9255ad89 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/e1c9698fe0a44000b830663d9255ad89 2024-11-20T15:24:03,587 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/f464e768de334fc094ab6ee1a08f2ca5 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/f464e768de334fc094ab6ee1a08f2ca5 2024-11-20T15:24:03,589 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/c1e67c0aa4d2472e809c3e9fa2add2c4 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/c1e67c0aa4d2472e809c3e9fa2add2c4 2024-11-20T15:24:03,593 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/2098147f60fd494d986e487bcf450bba to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/2098147f60fd494d986e487bcf450bba 2024-11-20T15:24:03,594 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/25153a7be7824be7953e39337adb28a7 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/25153a7be7824be7953e39337adb28a7 2024-11-20T15:24:03,595 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/41637198ebc14780af436bf1b494375c to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/41637198ebc14780af436bf1b494375c 2024-11-20T15:24:03,597 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/146eae16457c46a89871dce8bfcb5a58 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/146eae16457c46a89871dce8bfcb5a58 2024-11-20T15:24:03,598 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/6365ea60f74d4c7ea01224a97151d97f to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/6365ea60f74d4c7ea01224a97151d97f 2024-11-20T15:24:03,599 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/9d796ae710e148a69e58f91f63194903 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/9d796ae710e148a69e58f91f63194903 2024-11-20T15:24:03,601 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/6b2d8d3ce017473488ab3058539c3097 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/6b2d8d3ce017473488ab3058539c3097 2024-11-20T15:24:03,602 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/e1242db91e2642b5a4d7414711ac3875 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/e1242db91e2642b5a4d7414711ac3875 2024-11-20T15:24:03,603 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/cdf8ea84c6704165b0eb81b9e4a93911 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/cdf8ea84c6704165b0eb81b9e4a93911 2024-11-20T15:24:03,605 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/9fff94855230425a8a9e7e33c47d8fc1 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/9fff94855230425a8a9e7e33c47d8fc1 2024-11-20T15:24:03,606 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/697187f1f244468b9648b7c6cb9fd41f to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/697187f1f244468b9648b7c6cb9fd41f 2024-11-20T15:24:03,607 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/d811169b9246401b9b7ed2b18c8de703 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/d811169b9246401b9b7ed2b18c8de703 2024-11-20T15:24:03,608 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/036aad7f56e04523b518685f03667c4b to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/036aad7f56e04523b518685f03667c4b 2024-11-20T15:24:03,609 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/a4a0252644c842ab8688fa4dfca3945d to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/a4a0252644c842ab8688fa4dfca3945d 2024-11-20T15:24:03,611 DEBUG [StoreCloser-TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/6b33d4a9c25c46e4a1e3b64f4c5cbee5 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/6b33d4a9c25c46e4a1e3b64f4c5cbee5 2024-11-20T15:24:03,616 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/recovered.edits/450.seqid, newMaxSeqId=450, maxSeqId=4 2024-11-20T15:24:03,616 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050. 2024-11-20T15:24:03,617 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegion(1635): Region close journal for b9a29e8e077d15da0686c3415c44e050: 2024-11-20T15:24:03,618 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] handler.UnassignRegionHandler(170): Closed b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:03,618 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=193 updating hbase:meta row=b9a29e8e077d15da0686c3415c44e050, regionState=CLOSED 2024-11-20T15:24:03,621 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=194, resume processing ppid=193 2024-11-20T15:24:03,621 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=194, ppid=193, state=SUCCESS; CloseRegionProcedure b9a29e8e077d15da0686c3415c44e050, server=0b62285ead89,33387,1732116069954 in 1.1890 sec 2024-11-20T15:24:03,622 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=193, resume processing ppid=192 2024-11-20T15:24:03,622 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=193, ppid=192, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=b9a29e8e077d15da0686c3415c44e050, UNASSIGN in 1.1940 sec 2024-11-20T15:24:03,624 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=192, resume processing ppid=191 2024-11-20T15:24:03,624 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=192, ppid=191, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.1970 sec 2024-11-20T15:24:03,625 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732116243625"}]},"ts":"1732116243625"} 2024-11-20T15:24:03,626 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T15:24:03,628 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T15:24:03,630 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=191, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.2100 sec 2024-11-20T15:24:04,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-11-20T15:24:04,526 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 191 completed 2024-11-20T15:24:04,526 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46835 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T15:24:04,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46835 {}] procedure2.ProcedureExecutor(1098): Stored pid=195, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:24:04,528 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=195, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:24:04,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=195 2024-11-20T15:24:04,529 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=195, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:24:04,531 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:04,533 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A, FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B, FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C, FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/recovered.edits] 2024-11-20T15:24:04,535 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/1d0cd6c4f5d84b6f9d0c3cfc200bad4f to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/1d0cd6c4f5d84b6f9d0c3cfc200bad4f 2024-11-20T15:24:04,536 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/7953937bb5e74e4c973f4b72a6b60656 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/7953937bb5e74e4c973f4b72a6b60656 2024-11-20T15:24:04,537 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/f3472c3c7dae45c49c4b1eaf3accfe17 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/f3472c3c7dae45c49c4b1eaf3accfe17 2024-11-20T15:24:04,538 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/ffcd8376caf14e42a5653a67878710d1 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/A/ffcd8376caf14e42a5653a67878710d1 2024-11-20T15:24:04,540 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/097d8a4d000f479aad507937ec8f10a0 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/097d8a4d000f479aad507937ec8f10a0 2024-11-20T15:24:04,541 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/4d726ba99f4247aea6ca0e90dba7318c to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/4d726ba99f4247aea6ca0e90dba7318c 2024-11-20T15:24:04,542 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/8370aa1d5af846a390a8973de729d1e1 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/8370aa1d5af846a390a8973de729d1e1 2024-11-20T15:24:04,543 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ddcca627044b41389c34557a30a579f5 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/B/ddcca627044b41389c34557a30a579f5 2024-11-20T15:24:04,545 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/11d731c39bf64498a0dee06b47e36eaf to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/11d731c39bf64498a0dee06b47e36eaf 2024-11-20T15:24:04,546 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/36c3954dfd2a48379755c8f26b811b44 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/36c3954dfd2a48379755c8f26b811b44 2024-11-20T15:24:04,547 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/73920372b0f64b1aaa236eb4eb2f9c75 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/73920372b0f64b1aaa236eb4eb2f9c75 2024-11-20T15:24:04,548 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/db549c3fa58a4874a8f05c6b2a11cc6f to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/C/db549c3fa58a4874a8f05c6b2a11cc6f 2024-11-20T15:24:04,551 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/recovered.edits/450.seqid to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050/recovered.edits/450.seqid 2024-11-20T15:24:04,551 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/default/TestAcidGuarantees/b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:04,551 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T15:24:04,552 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T15:24:04,552 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T15:24:04,555 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201d331a0abb0a45fda0b01b1fe802af69_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201d331a0abb0a45fda0b01b1fe802af69_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:04,556 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201eb02666e07c4d3a851a5a762702cbfd_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201eb02666e07c4d3a851a5a762702cbfd_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:04,557 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112020ae1e55ba144cde8c9354ae47254079_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112020ae1e55ba144cde8c9354ae47254079_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:04,558 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120414673be641a4b16aac88c04a35ec210_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120414673be641a4b16aac88c04a35ec210_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:04,559 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205676e0bbb9ff4f0ca0ed860779a9cb2c_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205676e0bbb9ff4f0ca0ed860779a9cb2c_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:04,560 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120574e468cf3344e1392c32fd1da40cc81_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120574e468cf3344e1392c32fd1da40cc81_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:04,561 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112059b79eb545dd4ca9935ac82a19b276e9_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112059b79eb545dd4ca9935ac82a19b276e9_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:04,562 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120807785b1ac2d48eeb04d19f17ca95a85_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120807785b1ac2d48eeb04d19f17ca95a85_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:04,563 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120846f5b1ce8614af7bc6e81e7c736a982_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120846f5b1ce8614af7bc6e81e7c736a982_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:04,564 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120932701eb3c58409ea6ef8618881eb41d_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120932701eb3c58409ea6ef8618881eb41d_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:04,565 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120938caf3a8af24e9280f14f82cb8ec9e3_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120938caf3a8af24e9280f14f82cb8ec9e3_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:04,566 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120994557d4fd9f4be0bae74a9c99801ba6_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120994557d4fd9f4be0bae74a9c99801ba6_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:04,567 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209cf3aee0781b4dc0b60641612e8ac651_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209cf3aee0781b4dc0b60641612e8ac651_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:04,568 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209df9ede376d14fb0ad2eb89de2372b63_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209df9ede376d14fb0ad2eb89de2372b63_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:04,569 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a12c9b1514624706924a8dc36a4eed6e_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a12c9b1514624706924a8dc36a4eed6e_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:04,570 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a39931fa16814e149cdb44f355fbf27e_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a39931fa16814e149cdb44f355fbf27e_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:04,571 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a5143477ab7b4735ad6a60f7838252d7_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a5143477ab7b4735ad6a60f7838252d7_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:04,572 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120aabe844f5f6b4c2a8e1036ee7c3fc997_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120aabe844f5f6b4c2a8e1036ee7c3fc997_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:04,573 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120af2700b2baf64a1490c2291689db65c4_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120af2700b2baf64a1490c2291689db65c4_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:04,574 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bf54f799058c4f30920713a846057601_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bf54f799058c4f30920713a846057601_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:04,575 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c5461348f0c649f0bdc446de3fc88290_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c5461348f0c649f0bdc446de3fc88290_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:04,576 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dfa4ed779ac64fec9709d3abb511185f_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dfa4ed779ac64fec9709d3abb511185f_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:04,577 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f516a2ff5ee24cb69e32b9df8ef86094_b9a29e8e077d15da0686c3415c44e050 to hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f516a2ff5ee24cb69e32b9df8ef86094_b9a29e8e077d15da0686c3415c44e050 2024-11-20T15:24:04,578 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T15:24:04,580 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=195, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:24:04,582 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T15:24:04,584 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T15:24:04,585 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=195, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:24:04,585 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T15:24:04,585 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732116244585"}]},"ts":"9223372036854775807"} 2024-11-20T15:24:04,587 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T15:24:04,587 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => b9a29e8e077d15da0686c3415c44e050, NAME => 'TestAcidGuarantees,,1732116213657.b9a29e8e077d15da0686c3415c44e050.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T15:24:04,587 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T15:24:04,587 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732116244587"}]},"ts":"9223372036854775807"} 2024-11-20T15:24:04,588 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T15:24:04,591 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=195, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T15:24:04,592 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=195, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 65 msec 2024-11-20T15:24:04,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46835 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=195 2024-11-20T15:24:04,630 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 195 completed 2024-11-20T15:24:04,641 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=237 (was 238), OpenFileDescriptor=451 (was 453), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=659 (was 690), ProcessCount=11 (was 11), AvailableMemoryMB=5455 (was 5518) 2024-11-20T15:24:04,641 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-20T15:24:04,641 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T15:24:04,641 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5e83c466 to 127.0.0.1:62338 2024-11-20T15:24:04,641 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:24:04,642 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T15:24:04,642 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=633883069, stopped=false 2024-11-20T15:24:04,642 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=0b62285ead89,46835,1732116069247 2024-11-20T15:24:04,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T15:24:04,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33387-0x100158571480001, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T15:24:04,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T15:24:04,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33387-0x100158571480001, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T15:24:04,645 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-20T15:24:04,645 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T15:24:04,646 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33387-0x100158571480001, quorum=127.0.0.1:62338, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T15:24:04,647 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:24:04,647 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '0b62285ead89,33387,1732116069954' ***** 2024-11-20T15:24:04,647 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-20T15:24:04,648 INFO [RS:0;0b62285ead89:33387 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T15:24:04,648 INFO [RS:0;0b62285ead89:33387 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T15:24:04,648 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-20T15:24:04,648 INFO [RS:0;0b62285ead89:33387 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T15:24:04,648 INFO [RS:0;0b62285ead89:33387 {}] regionserver.HRegionServer(3579): Received CLOSE for dc4fbec7dbed2ad8a83ee5514bc45c4e 2024-11-20T15:24:04,649 INFO [RS:0;0b62285ead89:33387 {}] regionserver.HRegionServer(1224): stopping server 0b62285ead89,33387,1732116069954 2024-11-20T15:24:04,649 DEBUG [RS:0;0b62285ead89:33387 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:24:04,649 INFO [RS:0;0b62285ead89:33387 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T15:24:04,649 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing dc4fbec7dbed2ad8a83ee5514bc45c4e, disabling compactions & flushes 2024-11-20T15:24:04,649 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732116073943.dc4fbec7dbed2ad8a83ee5514bc45c4e. 2024-11-20T15:24:04,649 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732116073943.dc4fbec7dbed2ad8a83ee5514bc45c4e. 2024-11-20T15:24:04,649 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732116073943.dc4fbec7dbed2ad8a83ee5514bc45c4e. after waiting 0 ms 2024-11-20T15:24:04,649 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732116073943.dc4fbec7dbed2ad8a83ee5514bc45c4e. 2024-11-20T15:24:04,649 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing dc4fbec7dbed2ad8a83ee5514bc45c4e 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-20T15:24:04,650 INFO [RS:0;0b62285ead89:33387 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T15:24:04,650 INFO [RS:0;0b62285ead89:33387 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T15:24:04,650 INFO [RS:0;0b62285ead89:33387 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-20T15:24:04,650 INFO [RS:0;0b62285ead89:33387 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-20T15:24:04,650 DEBUG [RS:0;0b62285ead89:33387 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, dc4fbec7dbed2ad8a83ee5514bc45c4e=hbase:namespace,,1732116073943.dc4fbec7dbed2ad8a83ee5514bc45c4e.} 2024-11-20T15:24:04,650 DEBUG [RS_CLOSE_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-20T15:24:04,651 INFO [RS_CLOSE_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-20T15:24:04,651 DEBUG [RS_CLOSE_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-20T15:24:04,651 DEBUG [RS_CLOSE_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T15:24:04,651 DEBUG [RS_CLOSE_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T15:24:04,651 INFO [RS_CLOSE_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-20T15:24:04,651 DEBUG [RS:0;0b62285ead89:33387 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, dc4fbec7dbed2ad8a83ee5514bc45c4e 2024-11-20T15:24:04,663 INFO [regionserver/0b62285ead89:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T15:24:04,676 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/namespace/dc4fbec7dbed2ad8a83ee5514bc45c4e/.tmp/info/e5955f72577c401c826c0dd4af1d2136 is 45, key is default/info:d/1732116075292/Put/seqid=0 2024-11-20T15:24:04,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742576_1752 (size=5037) 2024-11-20T15:24:04,681 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/namespace/dc4fbec7dbed2ad8a83ee5514bc45c4e/.tmp/info/e5955f72577c401c826c0dd4af1d2136 2024-11-20T15:24:04,688 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/namespace/dc4fbec7dbed2ad8a83ee5514bc45c4e/.tmp/info/e5955f72577c401c826c0dd4af1d2136 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/namespace/dc4fbec7dbed2ad8a83ee5514bc45c4e/info/e5955f72577c401c826c0dd4af1d2136 2024-11-20T15:24:04,692 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/namespace/dc4fbec7dbed2ad8a83ee5514bc45c4e/info/e5955f72577c401c826c0dd4af1d2136, entries=2, sequenceid=6, filesize=4.9 K 2024-11-20T15:24:04,693 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for dc4fbec7dbed2ad8a83ee5514bc45c4e in 44ms, sequenceid=6, compaction requested=false 2024-11-20T15:24:04,704 DEBUG [RS_CLOSE_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/meta/1588230740/.tmp/info/57dd52996d054c8a8a6fe65283221bc9 is 143, key is hbase:namespace,,1732116073943.dc4fbec7dbed2ad8a83ee5514bc45c4e./info:regioninfo/1732116075174/Put/seqid=0 2024-11-20T15:24:04,708 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/namespace/dc4fbec7dbed2ad8a83ee5514bc45c4e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-20T15:24:04,709 INFO [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1732116073943.dc4fbec7dbed2ad8a83ee5514bc45c4e. 2024-11-20T15:24:04,709 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for dc4fbec7dbed2ad8a83ee5514bc45c4e: 2024-11-20T15:24:04,709 DEBUG [RS_CLOSE_REGION-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1732116073943.dc4fbec7dbed2ad8a83ee5514bc45c4e. 2024-11-20T15:24:04,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742577_1753 (size=7725) 2024-11-20T15:24:04,851 DEBUG [RS:0;0b62285ead89:33387 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T15:24:05,052 DEBUG [RS:0;0b62285ead89:33387 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T15:24:05,121 INFO [RS_CLOSE_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/meta/1588230740/.tmp/info/57dd52996d054c8a8a6fe65283221bc9 2024-11-20T15:24:05,149 DEBUG [RS_CLOSE_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/meta/1588230740/.tmp/rep_barrier/29c9c8961ba24faa910d34514306d731 is 102, key is TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557./rep_barrier:/1732116106513/DeleteFamily/seqid=0 2024-11-20T15:24:05,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742578_1754 (size=6025) 2024-11-20T15:24:05,153 INFO [RS_CLOSE_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/meta/1588230740/.tmp/rep_barrier/29c9c8961ba24faa910d34514306d731 2024-11-20T15:24:05,172 DEBUG [RS_CLOSE_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/meta/1588230740/.tmp/table/48e62abcbcc14eb191a41773f5249860 is 96, key is TestAcidGuarantees,,1732116075487.d2e9d0b7937c41ae63a82116ea698557./table:/1732116106513/DeleteFamily/seqid=0 2024-11-20T15:24:05,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742579_1755 (size=5942) 2024-11-20T15:24:05,252 DEBUG [RS:0;0b62285ead89:33387 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T15:24:05,452 DEBUG [RS:0;0b62285ead89:33387 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T15:24:05,576 INFO [RS_CLOSE_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/meta/1588230740/.tmp/table/48e62abcbcc14eb191a41773f5249860 2024-11-20T15:24:05,580 DEBUG [RS_CLOSE_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/meta/1588230740/.tmp/info/57dd52996d054c8a8a6fe65283221bc9 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/meta/1588230740/info/57dd52996d054c8a8a6fe65283221bc9 2024-11-20T15:24:05,583 INFO [RS_CLOSE_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/meta/1588230740/info/57dd52996d054c8a8a6fe65283221bc9, entries=22, sequenceid=93, filesize=7.5 K 2024-11-20T15:24:05,584 DEBUG [RS_CLOSE_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/meta/1588230740/.tmp/rep_barrier/29c9c8961ba24faa910d34514306d731 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/meta/1588230740/rep_barrier/29c9c8961ba24faa910d34514306d731 2024-11-20T15:24:05,585 INFO [regionserver/0b62285ead89:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T15:24:05,585 INFO [regionserver/0b62285ead89:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T15:24:05,586 INFO [RS_CLOSE_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/meta/1588230740/rep_barrier/29c9c8961ba24faa910d34514306d731, entries=6, sequenceid=93, filesize=5.9 K 2024-11-20T15:24:05,587 DEBUG [RS_CLOSE_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/meta/1588230740/.tmp/table/48e62abcbcc14eb191a41773f5249860 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/meta/1588230740/table/48e62abcbcc14eb191a41773f5249860 2024-11-20T15:24:05,590 INFO [RS_CLOSE_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/meta/1588230740/table/48e62abcbcc14eb191a41773f5249860, entries=9, sequenceid=93, filesize=5.8 K 2024-11-20T15:24:05,590 INFO [RS_CLOSE_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 939ms, sequenceid=93, compaction requested=false 2024-11-20T15:24:05,594 DEBUG [RS_CLOSE_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-20T15:24:05,595 DEBUG [RS_CLOSE_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T15:24:05,595 INFO [RS_CLOSE_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-20T15:24:05,595 DEBUG [RS_CLOSE_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-20T15:24:05,595 DEBUG [RS_CLOSE_META-regionserver/0b62285ead89:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T15:24:05,652 INFO [RS:0;0b62285ead89:33387 {}] regionserver.HRegionServer(1250): stopping server 0b62285ead89,33387,1732116069954; all regions closed. 2024-11-20T15:24:05,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741834_1010 (size=26050) 2024-11-20T15:24:05,659 DEBUG [RS:0;0b62285ead89:33387 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/oldWALs 2024-11-20T15:24:05,659 INFO [RS:0;0b62285ead89:33387 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 0b62285ead89%2C33387%2C1732116069954.meta:.meta(num 1732116073698) 2024-11-20T15:24:05,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741832_1008 (size=17503768) 2024-11-20T15:24:05,664 DEBUG [RS:0;0b62285ead89:33387 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/oldWALs 2024-11-20T15:24:05,664 INFO [RS:0;0b62285ead89:33387 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 0b62285ead89%2C33387%2C1732116069954:(num 1732116072720) 2024-11-20T15:24:05,664 DEBUG [RS:0;0b62285ead89:33387 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:24:05,664 INFO [RS:0;0b62285ead89:33387 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T15:24:05,664 INFO [RS:0;0b62285ead89:33387 {}] hbase.ChoreService(370): Chore service for: regionserver/0b62285ead89:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-20T15:24:05,664 INFO [RS:0;0b62285ead89:33387 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:33387 2024-11-20T15:24:05,664 INFO [regionserver/0b62285ead89:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-20T15:24:05,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33387-0x100158571480001, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0b62285ead89,33387,1732116069954 2024-11-20T15:24:05,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T15:24:05,671 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0b62285ead89,33387,1732116069954] 2024-11-20T15:24:05,671 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 0b62285ead89,33387,1732116069954; numProcessing=1 2024-11-20T15:24:05,673 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/0b62285ead89,33387,1732116069954 already deleted, retry=false 2024-11-20T15:24:05,673 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 0b62285ead89,33387,1732116069954 expired; onlineServers=0 2024-11-20T15:24:05,674 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '0b62285ead89,46835,1732116069247' ***** 2024-11-20T15:24:05,674 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T15:24:05,674 DEBUG [M:0;0b62285ead89:46835 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41d66913, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0b62285ead89/172.17.0.2:0 2024-11-20T15:24:05,674 INFO [M:0;0b62285ead89:46835 {}] regionserver.HRegionServer(1224): stopping server 0b62285ead89,46835,1732116069247 2024-11-20T15:24:05,674 INFO [M:0;0b62285ead89:46835 {}] regionserver.HRegionServer(1250): stopping server 0b62285ead89,46835,1732116069247; all regions closed. 2024-11-20T15:24:05,674 DEBUG [M:0;0b62285ead89:46835 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T15:24:05,674 DEBUG [M:0;0b62285ead89:46835 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T15:24:05,674 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T15:24:05,674 DEBUG [M:0;0b62285ead89:46835 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T15:24:05,674 DEBUG [master/0b62285ead89:0:becomeActiveMaster-HFileCleaner.large.0-1732116072455 {}] cleaner.HFileCleaner(306): Exit Thread[master/0b62285ead89:0:becomeActiveMaster-HFileCleaner.large.0-1732116072455,5,FailOnTimeoutGroup] 2024-11-20T15:24:05,674 INFO [M:0;0b62285ead89:46835 {}] hbase.ChoreService(370): Chore service for: master/0b62285ead89:0 had [] on shutdown 2024-11-20T15:24:05,675 DEBUG [M:0;0b62285ead89:46835 {}] master.HMaster(1733): Stopping service threads 2024-11-20T15:24:05,675 INFO [M:0;0b62285ead89:46835 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T15:24:05,675 DEBUG [master/0b62285ead89:0:becomeActiveMaster-HFileCleaner.small.0-1732116072456 {}] cleaner.HFileCleaner(306): Exit Thread[master/0b62285ead89:0:becomeActiveMaster-HFileCleaner.small.0-1732116072456,5,FailOnTimeoutGroup] 2024-11-20T15:24:05,675 ERROR [M:0;0b62285ead89:46835 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Client (59733779) connection to localhost/127.0.0.1:40559 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:40559,5,PEWorkerGroup] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-20T15:24:05,676 INFO [M:0;0b62285ead89:46835 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T15:24:05,676 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T15:24:05,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T15:24:05,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T15:24:05,677 DEBUG [M:0;0b62285ead89:46835 {}] zookeeper.ZKUtil(347): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T15:24:05,677 WARN [M:0;0b62285ead89:46835 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T15:24:05,677 INFO [M:0;0b62285ead89:46835 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-20T15:24:05,677 INFO [M:0;0b62285ead89:46835 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T15:24:05,678 DEBUG [M:0;0b62285ead89:46835 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T15:24:05,678 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T15:24:05,678 INFO [M:0;0b62285ead89:46835 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T15:24:05,678 DEBUG [M:0;0b62285ead89:46835 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T15:24:05,678 DEBUG [M:0;0b62285ead89:46835 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T15:24:05,678 DEBUG [M:0;0b62285ead89:46835 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T15:24:05,678 INFO [M:0;0b62285ead89:46835 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=814.55 KB heapSize=1004.59 KB 2024-11-20T15:24:05,701 DEBUG [M:0;0b62285ead89:46835 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dc51938326244ee585ad01603536a6cb is 82, key is hbase:meta,,1/info:regioninfo/1732116073833/Put/seqid=0 2024-11-20T15:24:05,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742580_1756 (size=5672) 2024-11-20T15:24:05,704 INFO [M:0;0b62285ead89:46835 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2341 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dc51938326244ee585ad01603536a6cb 2024-11-20T15:24:05,727 DEBUG [M:0;0b62285ead89:46835 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e4d5a1b3637c4677bcaf39c5374811c3 is 2279, key is \x00\x00\x00\x00\x00\x00\x00\xA8/proc:d/1732116215686/Put/seqid=0 2024-11-20T15:24:05,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742581_1757 (size=47881) 2024-11-20T15:24:05,738 INFO [M:0;0b62285ead89:46835 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=813.99 KB at sequenceid=2341 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e4d5a1b3637c4677bcaf39c5374811c3 2024-11-20T15:24:05,745 INFO [M:0;0b62285ead89:46835 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e4d5a1b3637c4677bcaf39c5374811c3 2024-11-20T15:24:05,768 DEBUG [M:0;0b62285ead89:46835 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aaaf7c39583e424081bd6e858869ca49 is 69, key is 0b62285ead89,33387,1732116069954/rs:state/1732116072483/Put/seqid=0 2024-11-20T15:24:05,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33387-0x100158571480001, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T15:24:05,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33387-0x100158571480001, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T15:24:05,773 INFO [RS:0;0b62285ead89:33387 {}] regionserver.HRegionServer(1307): Exiting; stopping=0b62285ead89,33387,1732116069954; zookeeper connection closed. 2024-11-20T15:24:05,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073742582_1758 (size=5156) 2024-11-20T15:24:05,773 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1013815f {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1013815f 2024-11-20T15:24:05,773 INFO [M:0;0b62285ead89:46835 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2341 (bloomFilter=true), to=hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aaaf7c39583e424081bd6e858869ca49 2024-11-20T15:24:05,773 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T15:24:05,785 DEBUG [M:0;0b62285ead89:46835 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dc51938326244ee585ad01603536a6cb as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/dc51938326244ee585ad01603536a6cb 2024-11-20T15:24:05,790 INFO [M:0;0b62285ead89:46835 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/dc51938326244ee585ad01603536a6cb, entries=8, sequenceid=2341, filesize=5.5 K 2024-11-20T15:24:05,791 DEBUG [M:0;0b62285ead89:46835 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e4d5a1b3637c4677bcaf39c5374811c3 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e4d5a1b3637c4677bcaf39c5374811c3 2024-11-20T15:24:05,797 INFO [M:0;0b62285ead89:46835 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e4d5a1b3637c4677bcaf39c5374811c3 2024-11-20T15:24:05,797 INFO [M:0;0b62285ead89:46835 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e4d5a1b3637c4677bcaf39c5374811c3, entries=195, sequenceid=2341, filesize=46.8 K 2024-11-20T15:24:05,800 DEBUG [M:0;0b62285ead89:46835 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aaaf7c39583e424081bd6e858869ca49 as hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/aaaf7c39583e424081bd6e858869ca49 2024-11-20T15:24:05,805 INFO [M:0;0b62285ead89:46835 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40559/user/jenkins/test-data/369b25e0-de1e-ba8f-f3ff-698c1aacdec3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/aaaf7c39583e424081bd6e858869ca49, entries=1, sequenceid=2341, filesize=5.0 K 2024-11-20T15:24:05,805 INFO [M:0;0b62285ead89:46835 {}] regionserver.HRegion(3040): Finished flush of dataSize ~814.55 KB/834098, heapSize ~1004.29 KB/1028392, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=2341, compaction requested=false 2024-11-20T15:24:05,832 INFO [M:0;0b62285ead89:46835 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T15:24:05,832 DEBUG [M:0;0b62285ead89:46835 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T15:24:05,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35761 is added to blk_1073741830_1006 (size=987566) 2024-11-20T15:24:05,841 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-20T15:24:05,841 INFO [M:0;0b62285ead89:46835 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-20T15:24:05,841 INFO [M:0;0b62285ead89:46835 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:46835 2024-11-20T15:24:05,844 DEBUG [M:0;0b62285ead89:46835 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/0b62285ead89,46835,1732116069247 already deleted, retry=false 2024-11-20T15:24:05,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T15:24:05,946 INFO [M:0;0b62285ead89:46835 {}] regionserver.HRegionServer(1307): Exiting; stopping=0b62285ead89,46835,1732116069247; zookeeper connection closed. 2024-11-20T15:24:05,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46835-0x100158571480000, quorum=127.0.0.1:62338, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T15:24:05,970 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f79ec76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T15:24:05,973 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T15:24:05,975 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T15:24:05,976 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T15:24:05,976 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/hadoop.log.dir/,STOPPED} 2024-11-20T15:24:05,982 WARN [BP-1888144914-172.17.0.2-1732116066515 heartbeating to localhost/127.0.0.1:40559 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T15:24:05,982 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T15:24:05,982 WARN [BP-1888144914-172.17.0.2-1732116066515 heartbeating to localhost/127.0.0.1:40559 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1888144914-172.17.0.2-1732116066515 (Datanode Uuid e1101302-ca42-4c36-926f-c76325d0f3d3) service to localhost/127.0.0.1:40559 2024-11-20T15:24:05,982 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T15:24:05,985 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/cluster_20e01121-862a-f127-22c7-564b6246e54a/dfs/data/data1/current/BP-1888144914-172.17.0.2-1732116066515 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T15:24:05,986 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/cluster_20e01121-862a-f127-22c7-564b6246e54a/dfs/data/data2/current/BP-1888144914-172.17.0.2-1732116066515 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T15:24:05,987 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T15:24:06,000 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T15:24:06,001 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T15:24:06,002 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T15:24:06,002 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T15:24:06,002 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7df61be5-8075-aad9-f266-2b8470f21ce3/hadoop.log.dir/,STOPPED} 2024-11-20T15:24:06,050 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-20T15:24:06,282 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down